gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import logging
from typing import Any
from decimal import Decimal
import flask
from flask import Blueprint, Response, render_template, request,\
url_for, redirect
from flask_babel import gettext, lazy_gettext
from flask_login import login_required, current_user
from waitlist.base import db
from waitlist.permissions import perm_manager
from waitlist.storage.database import ShipCheckCollection, WaitlistGroup, ShipCheck,\
InvType, InvGroup, MarketGroup
from waitlist.blueprints.settings import add_menu_entry
from waitlist.utility.constants import check_types
bp = Blueprint('ship_assignment', __name__)
logger = logging.getLogger(__name__)
perm_manager.define_permission('ship_assignment_edit')
@bp.route('/', methods=['GET'])
@login_required
@perm_manager.require('ship_assignment_edit')
def ship_assignments():
checks = db.session.query(ShipCheckCollection).all()
wl_groups = db.session.query(WaitlistGroup).all()
# TODO: this site needs the forms for adding a collection
# and removing one, only has edit for now
return render_template('settings/ship_assignment/collection_list.html',
checks=checks, groups=wl_groups)
@bp.route('/col/edit', methods=['GET'])
@login_required
@perm_manager.require('ship_assignment_edit')
def collection_edit() -> Response:
cid: int = int(request.args.get('collection_id'))
coll: ShipCheckCollection = db.session.query(ShipCheckCollection).get(cid)
groups: WaitlistGroup = db.session.query(WaitlistGroup).all()
return render_template('settings/ship_assignment/collection_edit.html', coll=coll, groups=groups, check_type_map=check_types.CHECK_NAME_MAP)
@bp.route('/col/<int:coll_id>/change', methods=['POST'])
@login_required
@perm_manager.require('ship_assignment_edit')
def collection_change(coll_id: int) -> Response:
coll_name = request.form['coll_name']
group_id = request.form['group_id']
target_id = request.form['target_id']
tag = request.form['tag']
collection: ShipCheckCollection = db.session.query(ShipCheckCollection).get(coll_id)
if collection is None:
flask.flash('Invalid Collection ID provided', 'danger')
return redirect(url_for('.ship_assignments'))
collection.checkCollectionName = coll_name
collection.waitlistGroupID = group_id
collection.defaultTargetID = target_id
collection.defaultTag = tag
db.session.commit()
flask.flash('Collection was updated', 'success')
return redirect(url_for('.collection_edit', collection_id=coll_id))
@bp.route('/col/add', methods=['POST'])
@login_required
@perm_manager.require('ship_assignment_edit')
def collection_add():
name: str = request.form['name']
wl_group_id = int(request.form['group_id'])
def_target_id = int(request.form['target_id'])
tag = request.form['tag']
collection = ShipCheckCollection(
checkCollectionName=name,
waitlistGroupID=wl_group_id,
defaultTargetID=def_target_id,
defaultTag=tag
)
db.session.add(collection)
db.session.commit()
return redirect(url_for('.ship_assignments'))
@bp.route('/col/remove', methods=['POST'])
@login_required
@perm_manager.require('ship_assignment_edit')
def collection_remove():
cid: int = request.form['collection_id']
db.session.query(ShipCheckCollection).filter(ShipCheckCollection.checkCollectionID == cid).delete()
db.session.commit()
return redirect(url_for('.ship_assignments'))
def get_id_type(check_type: int) -> Any:
type_mapping: Dict[int, Any] = {
check_types.SHIP_CHECK_TYPEID: InvType,
check_types.SHIP_CHECK_INVGROUP: InvGroup,
check_types.SHIP_CHECK_MARKETGROUP: MarketGroup,
check_types.MODULE_CHECK_TYPEID: InvType,
check_types.MODULE_CHECK_MARKETGROUP: MarketGroup
}
return type_mapping[check_type]
@bp.route('/col/<int:coll_id>/checks', methods=['POST'])
@login_required
@perm_manager.require('ship_assignment_edit')
def check_add(coll_id: int) -> Response:
collection: ShipCheckCollection = db.session.query(ShipCheckCollection).get(coll_id)
name: str = request.form['check_name']
check_id: int = int(request.form['check_type'], 10)
target: int = int(request.form['check_target'], 10)
order: int = int(request.form['order'], 10)
modifier: Decimal = Decimal(request.form['modifier'])
check_ids = [int(check_id.strip()) for check_id in request.form['ids'].split(',')]
tag = request.form['tag']
check: ShipCheck = ShipCheck(
checkName=name,
collectionID=coll_id,
checkTargetID=target,
checkType=check_id,
order=order,
modifier=modifier,
checkTag=tag
)
target_type = get_id_type(check_id)
for obj_id in check_ids:
obj = db.session.query(target_type).get(obj_id)
check.ids.append(obj)
# we only have restrictions for specific types of checks
if check_id in [check_types.MODULE_CHECK_MARKETGROUP, check_types.MODULE_CHECK_TYPEID]:
rest_typeids = [int(i.strip()) for i in request.form['rest_typeids'].split(',')]
rest_invgroupids = [int(i.strip()) for i in request.form['rest_invgroupids'].split(',')]
rest_mgroupids = [int(i.strip()) for i in request.form['rest_mgroupids'].split(',')]
for type_id in rest_typeids:
invtype = db.session.query(InvType).get(type_id)
check.check_rest_types.append(invtype)
for group_id in rest_invgroupids:
check.check_rest_groups.append(
db.session.query(InvGroup).get(group_id)
)
for mgroup_id in rest_mgroupids:
check.check_rest_market_groups.append(
db.session.query(MarketGroup).get(mgroup_id)
)
db.session.add(check)
db.session.commit()
return redirect(url_for('.collection_edit', collection_id=coll_id))
@bp.route('/check/<int:check_id>/edit', methods=['GET'])
@login_required
@perm_manager.require('ship_assignment_edit')
def check_edit(check_id:int) -> Response:
check: ShipCheck = db.session.query(ShipCheck).get(check_id)
return render_template('settings/ship_assignment/check_edit.html', check=check,
check_type_map=check_types.CHECK_NAME_MAP,
waitlists=check.collection.waitlistGroup.waitlists)
@bp.route('/check/<int:check_id>/', methods=['POST'])
@login_required
@perm_manager.require('ship_assignment_edit')
def check_change(check_id:int) -> Response:
name: str = request.form['check_name']
order: int = int(request.form['order'])
target_id: int = int(request.form['check_target'])
check_modifier: Decimal = Decimal(request.form['modifier'])
check_type: int = int(request.form['check_type'])
check_ids = [int(check_id.strip()) for check_id in request.form['ids'].split(',')]
tag = request.form['tag']
check: ShipCheck = db.session.query(ShipCheck).get(check_id)
# clear old items
# this needs to be done before changing the type
# because otherwise we delete from the wrong relationship
check.ids=[]
check.order = order
check.checkTargetID = target_id
check.checkType = check_type
check.checkName = name
check.modifier = check_modifier
check.checkTag = tag
# add new items
# this needs to be done after changing the type
# because otherwise we add to the wrong relationship
target_type = get_id_type(check_type)
for obj_id in check_ids:
obj = db.session.query(target_type).get(obj_id)
check.ids.append(obj)
# we only have restrictions for specific types of checks
if check_type in [check_types.MODULE_CHECK_MARKETGROUP, check_types.MODULE_CHECK_TYPEID]:
logger.debug("Adding restrictions to check")
rest_typeids = [int(i.strip()) for i in request.form['rest_typeids'].split(',') if i.strip()]
rest_invgroupids = [int(i.strip()) for i in request.form['rest_invgroupids'].split(',') if i.strip()]
rest_mgroupids = [int(i.strip()) for i in request.form['rest_mgroupids'].split(',') if i.strip()]
logger.debug("types: %r invgroups: %r mgroups: %r", rest_typeids, rest_invgroupids, rest_mgroupids)
check.check_rest_types = []
check.check_rest_groups = []
check.check_rest_market_groups = []
for type_id in rest_typeids:
invtype = db.session.query(InvType).get(type_id)
check.check_rest_types.append(invtype)
for group_id in rest_invgroupids:
check.check_rest_groups.append(
db.session.query(InvGroup).get(group_id)
)
for mgroup_id in rest_mgroupids:
check.check_rest_market_groups.append(
db.session.query(MarketGroup).get(mgroup_id)
)
db.session.commit()
return redirect(url_for('.collection_edit', collection_id=check.collection.checkCollectionID))
@bp.route('/check/delete', methods=['POST'])
@login_required
@perm_manager.require('ship_assignment_edit')
def check_delete():
check_id = int(request.form['check_id'])
check = db.session.query(ShipCheck).get(check_id)
db.session.delete(check)
db.session.commit()
return redirect(request.referrer)
add_menu_entry('ship_assignment.ship_assignments', lazy_gettext('Ship Classification'), perm_manager.get_permission('ship_assignment_edit').can)
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs prediction on a trained model."""
import argparse
import datetime
import os
import shutil
import sys
import tempfile
from tensorflow.python.lib.io import file_io
import apache_beam as beam
from apache_beam.transforms import window
from apache_beam.utils.windowed_value import WindowedValue
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: includes the script's name.
Returns:
argparse object
"""
parser = argparse.ArgumentParser(
description='Runs Prediction inside a beam or Dataflow job.')
# cloud options
parser.add_argument('--project-id',
help='The project to which the job will be submitted.')
parser.add_argument('--cloud',
action='store_true',
help='Run preprocessing on the cloud.')
parser.add_argument('--job-name',
default=('mltoolbox-batch-prediction-' +
datetime.datetime.now().strftime('%Y%m%d%H%M%S')),
help='Dataflow job name. Must be unique over all jobs.')
parser.add_argument('--extra-package',
default=[],
action='append',
help=('If using --cloud, also installs these packages on '
'each dataflow worker'))
# I/O args
parser.add_argument('--predict-data',
required=True,
help='Data to run prediction on')
parser.add_argument('--trained-model-dir',
required=True,
help='Usually train_output_path/model.')
parser.add_argument('--output-dir',
required=True,
help=('Location to save output.'))
# Other args
parser.add_argument('--batch-size',
required=False,
default=1000,
type=int,
help=('Batch size. Larger values consumes more memrory '
'but takes less time to finish.'))
parser.add_argument('--shard-files',
dest='shard_files',
action='store_true',
help='Shard files')
parser.add_argument('--no-shard-files',
dest='shard_files',
action='store_false',
help='Don\'t shard files')
parser.set_defaults(shard_files=True)
parser.add_argument('--output-format',
choices=['csv', 'json'],
default='csv',
help="""
The output results.
raw_json: produces a newline file where each line is json. No
post processing is performed and the output matches what the trained
model produces.
csv: produces a csv file without a header row and a header csv file.
For classification problems, the vector of probabalities for each
target class is split into individual csv columns.""")
args, _ = parser.parse_known_args(args=argv[1:])
if args.cloud:
if not args.project_id:
raise ValueError('--project-id needed with --cloud')
if not args.trained_model_dir.startswith('gs://'):
raise ValueError('--trained-model-dir needs to be a GCS path,')
if not args.output_dir.startswith('gs://'):
raise ValueError('--output-dir needs to be a GCS path.')
if not args.predict_data.startswith('gs://'):
raise ValueError('--predict-data needs to be a GCS path.')
return args
class EmitAsBatchDoFn(beam.DoFn):
"""A DoFn that buffers the records and emits them batch by batch."""
def __init__(self, batch_size):
"""Constructor of EmitAsBatchDoFn beam.DoFn class.
Args:
batch_size: the max size we want to buffer the records before emitting.
"""
self._batch_size = batch_size
self._cached = []
def process(self, element):
self._cached.append(element)
if len(self._cached) >= self._batch_size:
emit = self._cached
self._cached = []
yield emit
def finish_bundle(self, element=None):
if len(self._cached) > 0: # pylint: disable=g-explicit-length-test
yield WindowedValue(self._cached, -1, [window.GlobalWindow()])
class RunGraphDoFn(beam.DoFn):
"""A DoFn for running the TF graph."""
def __init__(self, trained_model_dir):
self._trained_model_dir = trained_model_dir
self._session = None
def start_bundle(self, element=None):
from tensorflow.python.saved_model import tag_constants
from tensorflow.contrib.session_bundle import bundle_shim
self._session, meta_graph = bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(
self._trained_model_dir, tags=[tag_constants.SERVING])
signature = meta_graph.signature_def['serving_default']
# get the mappings between aliases and tensor names
# for both inputs and outputs
self._input_alias_map = {friendly_name: tensor_info_proto.name
for (friendly_name, tensor_info_proto) in signature.inputs.items()}
self._output_alias_map = {friendly_name: tensor_info_proto.name
for (friendly_name, tensor_info_proto) in signature.outputs.items()}
self._aliases, self._tensor_names = zip(*self._output_alias_map.items())
def finish_bundle(self, element=None):
self._session.close()
def process(self, element):
"""Run batch prediciton on a TF graph.
Args:
element: list of strings, representing one batch input to the TF graph.
"""
import collections
import apache_beam as beam
num_in_batch = 0
try:
assert self._session is not None
feed_dict = collections.defaultdict(list)
for line in element:
# Remove trailing newline.
if line.endswith('\n'):
line = line[:-1]
feed_dict[self._input_alias_map.values()[0]].append(line)
num_in_batch += 1
# batch_result is list of numpy arrays with batch_size many rows.
batch_result = self._session.run(fetches=self._tensor_names,
feed_dict=feed_dict)
# ex batch_result for batch_size > 1:
# (array([value1, value2, ..., value_batch_size]),
# array([[a1, b1, c1]], ..., [a_batch_size, b_batch_size, c_batch_size]]),
# ...)
# ex batch_result for batch_size == 1:
# (value,
# array([a1, b1, c1]),
# ...)
# Convert the results into a dict and unbatch the results.
if num_in_batch > 1:
for result in zip(*batch_result):
predictions = {}
for name, value in zip(self._aliases, result):
predictions[name] = (value.tolist() if getattr(value, 'tolist', None) else value)
yield predictions
else:
predictions = {}
for i in range(len(self._aliases)):
value = batch_result[i]
value = (value.tolist() if getattr(value, 'tolist', None)
else value)
predictions[self._aliases[i]] = value
yield predictions
except Exception as e: # pylint: disable=broad-except
yield beam.pvalue.SideOutputValue('errors',
(str(e), element))
class RawJsonCoder(beam.coders.Coder):
"""Coder for json newline files."""
def encode(self, obj):
"""Encodes a python object into a JSON string.
Args:
obj: python object.
Returns:
JSON string.
"""
import json
return json.dumps(obj, separators=(',', ': '))
class CSVCoder(beam.coders.Coder):
"""Coder for CSV files containing the output of prediction."""
def __init__(self, header):
"""Sets the headers in the csv file.
Args:
header: list of strings that correspond to keys in the predictions dict.
"""
self._header = header
def make_header_string(self):
return ','.join(self._header)
def encode(self, tf_graph_predictions):
"""Encodes the graph json prediction into csv.
Args:
tf_graph_predictions: python dict.
Returns:
csv string.
"""
row = []
for col in self._header:
row.append(str(tf_graph_predictions[col]))
return ','.join(row)
class FormatAndSave(beam.PTransform):
def __init__(self, args):
self._shard_name_template = None if args.shard_files else ''
self._output_format = args.output_format
self._output_dir = args.output_dir
# Get the BQ schema if csv.
if self._output_format == 'csv':
from tensorflow.python.saved_model import tag_constants
from tensorflow.contrib.session_bundle import bundle_shim
from tensorflow.core.framework import types_pb2
session, meta_graph = bundle_shim.load_session_bundle_or_saved_model_bundle_from_path(
args.trained_model_dir, tags=[tag_constants.SERVING])
signature = meta_graph.signature_def['serving_default']
self._schema = []
for friendly_name in sorted(signature.outputs):
tensor_info_proto = signature.outputs[friendly_name]
# TODO(brandondutra): Could dtype be DT_INVALID?
# Consider getting the dtype from the graph via
# session.graph.get_tensor_by_name(tensor_info_proto.name).dtype)
dtype = tensor_info_proto.dtype
if dtype == types_pb2.DT_FLOAT or dtype == types_pb2.DT_DOUBLE:
bq_type = 'FLOAT'
elif dtype == types_pb2.DT_INT32 or dtype == types_pb2.DT_INT64:
bq_type = 'INTEGER'
else:
bq_type = 'STRING'
self._schema.append({'mode': 'NULLABLE',
'name': friendly_name,
'type': bq_type})
session.close()
def apply(self, datasets):
return self.expand(datasets)
def expand(self, datasets):
import json
tf_graph_predictions, errors = datasets
if self._output_format == 'json':
(tf_graph_predictions |
'Write Raw JSON' >>
beam.io.textio.WriteToText(os.path.join(self._output_dir, 'predictions'),
file_name_suffix='.json',
coder=RawJsonCoder(),
shard_name_template=self._shard_name_template))
elif self._output_format == 'csv':
# make a csv header file
header = [col['name'] for col in self._schema]
csv_coder = CSVCoder(header)
(tf_graph_predictions.pipeline |
'Make CSV Header' >>
beam.Create([json.dumps(self._schema, indent=2)]) |
'Write CSV Schema File' >>
beam.io.textio.WriteToText(os.path.join(self._output_dir, 'csv_schema'),
file_name_suffix='.json',
shard_name_template=''))
# Write the csv predictions
(tf_graph_predictions |
'Write CSV' >>
beam.io.textio.WriteToText(os.path.join(self._output_dir, 'predictions'),
file_name_suffix='.csv',
coder=csv_coder,
shard_name_template=self._shard_name_template))
else:
raise ValueError('FormatAndSave: unknown format %s', self._output_format)
# Write the errors to a text file.
(errors |
'Write Errors' >>
beam.io.textio.WriteToText(os.path.join(self._output_dir, 'errors'),
file_name_suffix='.txt',
shard_name_template=self._shard_name_template))
def make_prediction_pipeline(pipeline, args):
"""Builds the prediction pipeline.
Reads the csv files, prepends a ',' if the target column is missing, run
prediction, and then prints the formated results to a file.
Args:
pipeline: the pipeline
args: command line args
"""
# DF bug: DF does not work with unicode strings
predicted_values, errors = (
pipeline |
'Read CSV Files' >>
beam.io.ReadFromText(str(args.predict_data),
strip_trailing_newlines=True) |
'Batch Input' >>
beam.ParDo(EmitAsBatchDoFn(args.batch_size)) |
'Run TF Graph on Batches' >>
beam.ParDo(RunGraphDoFn(args.trained_model_dir)).with_outputs('errors', main='main'))
((predicted_values, errors) |
'Format and Save' >>
FormatAndSave(args))
def main(argv=None):
args = parse_arguments(sys.argv if argv is None else argv)
if args.cloud:
tmpdir = tempfile.mkdtemp()
try:
local_packages = [os.path.join(tmpdir, os.path.basename(p)) for p in args.extra_package]
for source, dest in zip(args.extra_package, local_packages):
file_io.copy(source, dest, overwrite=True)
options = {
'staging_location': os.path.join(args.output_dir, 'tmp', 'staging'),
'temp_location': os.path.join(args.output_dir, 'tmp', 'staging'),
'job_name': args.job_name,
'project': args.project_id,
'no_save_main_session': True,
'extra_packages': local_packages,
'teardown_policy': 'TEARDOWN_ALWAYS',
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
# Or use BlockingDataflowPipelineRunner
p = beam.Pipeline('DataflowRunner', options=opts)
make_prediction_pipeline(p, args)
print(('Dataflow Job submitted, see Job %s at '
'https://console.developers.google.com/dataflow?project=%s') %
(options['job_name'], args.project_id))
sys.stdout.flush()
runner_results = p.run()
finally:
shutil.rmtree(tmpdir)
else:
p = beam.Pipeline('DirectRunner')
make_prediction_pipeline(p, args)
runner_results = p.run()
return runner_results
if __name__ == '__main__':
runner_results = main()
runner_results.wait_until_finish()
| |
from __future__ import print_function
import errno
import json
import os
import platform
import re
import shlex
import signal
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
import traceback
from pynailgun import NailgunConnection, NailgunException
from timing import monotonic_time_nanos
from tracing import Tracing
from subprocutils import check_output, CalledProcessError
MAX_BUCKD_RUN_COUNT = 64
BUCKD_CLIENT_TIMEOUT_MILLIS = 60000
GC_MAX_PAUSE_TARGET = 15000
JAVA_MAX_HEAP_SIZE_MB = 1000
# Describes a resource used by this driver.
# - name: logical name of the resources
# - executable: whether the resource should/needs execute permissions
# - basename: required basename of the resource
class Resource(object):
def __init__(self, name, executable=False, basename=None):
self.name = name
self.executable = executable
self.basename = name if basename is None else basename
# Resource that get propagated to buck via system properties.
EXPORTED_RESOURCES = [
Resource("testrunner_classes"),
Resource("abi_processor_classes"),
Resource("path_to_asm_jar"),
Resource("logging_config_file"),
Resource("path_to_pathlib_py", basename='pathlib.py'),
Resource("path_to_intellij_py"),
Resource("path_to_pex"),
Resource("path_to_pywatchman"),
Resource("path_to_sh_binary_template"),
Resource("jacoco_agent_jar"),
Resource("report_generator_jar"),
Resource("path_to_static_content"),
Resource("path_to_pex", executable=True),
Resource("dx"),
Resource("android_agent_path"),
Resource("native_exopackage_fake_path"),
]
class RestartBuck(Exception):
pass
class BuckToolException(Exception):
pass
class BuckTool(object):
def __init__(self, buck_project):
self._buck_project = buck_project
self._tmp_dir = self._platform_path(buck_project.tmp_dir)
self._pathsep = os.pathsep
if (sys.platform == 'cygwin'):
self._pathsep = ';'
# Check whether the given resource exists.
def _has_resource(self, resource):
raise NotImplementedError()
# Return an on-disk path to the given resource. This may cause
# implementations to unpack the resource at this point.
def _get_resource(self, resource):
raise NotImplementedError()
def _use_buckd(self):
return not os.environ.get('NO_BUCKD')
def _environ_for_buck(self):
env = os.environ.copy()
env['CLASSPATH'] = self._get_bootstrap_classpath()
env['BUCK_CLASSPATH'] = self._get_java_classpath()
# Buck overwrites these variables for a few purposes.
# Pass them through with their original values for
# tests that need them.
for f in ('TEMPDIR', 'TEMP', 'TMPDIR', 'TMP'):
orig_value = env.get(f)
if orig_value is not None:
env['BUCK_ORIG_' + f] = orig_value
return env
def launch_buck(self, build_id):
with Tracing('BuckRepo.launch_buck'):
self.kill_autobuild()
if 'clean' in sys.argv:
self.kill_buckd()
buck_version_uid = self._get_buck_version_uid()
use_buckd = self._use_buckd()
has_watchman = bool(which('watchman'))
if use_buckd and has_watchman:
buckd_run_count = self._buck_project.get_buckd_run_count()
running_version = self._buck_project.get_running_buckd_version()
new_buckd_run_count = buckd_run_count + 1
if (buckd_run_count == MAX_BUCKD_RUN_COUNT or
running_version != buck_version_uid):
self.kill_buckd()
new_buckd_run_count = 0
if new_buckd_run_count == 0 or not self._is_buckd_running():
self.launch_buckd(buck_version_uid=buck_version_uid)
else:
self._buck_project.update_buckd_run_count(new_buckd_run_count)
elif use_buckd and not has_watchman:
print("Not using buckd because watchman isn't installed.",
file=sys.stderr)
elif not use_buckd:
print("Not using buckd because NO_BUCKD is set.",
file=sys.stderr)
env = self._environ_for_buck()
env['BUCK_BUILD_ID'] = build_id
buck_socket_path = self._buck_project.get_buckd_socket_path()
if use_buckd and self._is_buckd_running() and \
os.path.exists(buck_socket_path):
with Tracing('buck', args={'command': sys.argv[1:]}):
with NailgunConnection('local:.buckd/sock', cwd=self._buck_project.root) as c:
exit_code = c.send_command(
'com.facebook.buck.cli.Main',
sys.argv[1:],
env=env,
cwd=self._buck_project.root)
if exit_code == 2:
print('Daemon is busy, please wait',
'or run "buck kill" to terminate it.',
file=sys.stderr)
return exit_code
command = ["buck"]
extra_default_options = [
"-Djava.io.tmpdir={0}".format(self._tmp_dir)
]
command.extend(self._get_java_args(buck_version_uid, extra_default_options))
command.append("com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper")
command.append("com.facebook.buck.cli.Main")
command.extend(sys.argv[1:])
return subprocess.call(command,
cwd=self._buck_project.root,
env=env,
executable=which("java"))
def launch_buckd(self, buck_version_uid=None):
with Tracing('BuckRepo.launch_buckd'):
self._setup_watchman_watch()
if buck_version_uid is None:
buck_version_uid = self._get_buck_version_uid()
# Override self._tmp_dir to a long lived directory.
buckd_tmp_dir = self._buck_project.create_buckd_tmp_dir()
ngserver_output_path = os.path.join(buckd_tmp_dir, 'ngserver-out')
'''
Use SoftRefLRUPolicyMSPerMB for immediate GC of javac output.
Set timeout to 60s (longer than the biggest GC pause seen for a 2GB
heap) and GC target to 15s. This means that the GC has to miss its
target by 100% or many 500ms heartbeats must be missed before a client
disconnection occurs. Specify port 0 to allow Nailgun to find an
available port, then parse the port number out of the first log entry.
'''
command = ["buckd"]
extra_default_options = [
"-Dbuck.buckd_launch_time_nanos={0}".format(monotonic_time_nanos()),
"-XX:MaxGCPauseMillis={0}".format(GC_MAX_PAUSE_TARGET),
"-XX:SoftRefLRUPolicyMSPerMB=0",
# Stop Java waking up every 50ms to collect thread
# statistics; doing it once every five seconds is much
# saner for a long-lived daemon.
"-XX:PerfDataSamplingInterval=5000",
# Likewise, waking up once per second just in case
# there's some rebalancing to be done is silly.
"-XX:+UnlockDiagnosticVMOptions",
"-XX:GuaranteedSafepointInterval=5000",
"-Djava.io.tmpdir={0}".format(buckd_tmp_dir),
"-Dcom.martiansoftware.nailgun.NGServer.outputPath={0}".format(
ngserver_output_path),
]
if is_java8():
extra_default_options.extend([
"-XX:+UseG1GC",
"-XX:MaxHeapFreeRatio=40",
])
command.extend(self._get_java_args(buck_version_uid, extra_default_options))
command.append("com.facebook.buck.cli.bootstrapper.ClassLoaderBootstrapper")
command.append("com.martiansoftware.nailgun.NGServer")
command.append("local:.buckd/sock")
command.append("{0}".format(BUCKD_CLIENT_TIMEOUT_MILLIS))
'''
Change the process group of the child buckd process so that when this
script is interrupted, it does not kill buckd.
'''
def preexec_func():
# Close any open file descriptors to further separate buckd from its
# invoking context (e.g. otherwise we'd hang when running things like
# `ssh localhost buck clean`).
# N.B. preexec_func is POSIX-only, and any reasonable
# POSIX system has a /dev/null
os.setpgrp()
dev_null_fd = os.open("/dev/null", os.O_RDWR)
os.dup2(dev_null_fd, 0)
os.dup2(dev_null_fd, 1)
os.dup2(dev_null_fd, 2)
os.close(dev_null_fd)
buck_socket_path = self._buck_project.get_buckd_socket_path()
# Make sure the Unix domain socket doesn't exist before this call.
try:
os.unlink(buck_socket_path)
except OSError, e:
if e.errno == errno.ENOENT:
# Socket didn't previously exist.
pass
else:
raise e
process = subprocess.Popen(
command,
executable=which("java"),
cwd=self._buck_project.root,
close_fds=True,
preexec_fn=preexec_func,
env=self._environ_for_buck())
self._buck_project.save_buckd_version(buck_version_uid)
self._buck_project.update_buckd_run_count(0)
# Give Java some time to create the listening socket.
for i in range(0, 100):
if not os.path.exists(buck_socket_path):
time.sleep(0.01)
returncode = process.poll()
# If the process hasn't exited yet, everything is working as expected
if returncode is None:
return 0
return returncode
def kill_autobuild(self):
autobuild_pid = self._buck_project.get_autobuild_pid()
if autobuild_pid:
if autobuild_pid.isdigit():
try:
os.kill(autobuild_pid, signal.SIGTERM)
except OSError:
pass
def kill_buckd(self):
with Tracing('BuckRepo.kill_buckd'):
buckd_socket_path = self._buck_project.get_buckd_socket_path()
if os.path.exists(buckd_socket_path):
print("Shutting down nailgun server...", file=sys.stderr)
try:
with NailgunConnection('local:.buckd/sock', cwd=self._buck_project.root) as c:
c.send_command('ng-stop')
except NailgunException as e:
if e.code not in (NailgunException.CONNECT_FAILED,
NailgunException.CONNECTION_BROKEN,
NailgunException.UNEXPECTED_CHUNKTYPE):
raise BuckToolException(
'Unexpected error shutting down nailgun server: ' +
str(e))
self._buck_project.clean_up_buckd()
def _setup_watchman_watch(self):
with Tracing('BuckRepo._setup_watchman_watch'):
if not which('watchman'):
message = textwrap.dedent("""\
Watchman not found, please install when using buckd.
See https://github.com/facebook/watchman for details.""")
if sys.platform == "darwin":
message += "\n(brew install watchman on OS X)"
# Bail if watchman isn't installed as we know java's
# FileSystemWatcher will take too long to process events.
raise BuckToolException(message)
print("Using watchman.", file=sys.stderr)
def _is_buckd_running(self):
with Tracing('BuckRepo._is_buckd_running'):
buckd_socket_path = self._buck_project.get_buckd_socket_path()
if not os.path.exists(buckd_socket_path):
return False
try:
with NailgunConnection(
'local:.buckd/sock',
stdin=None,
stdout=None,
stderr=None,
cwd=self._buck_project.root) as c:
c.send_command('ng-stats')
except NailgunException as e:
if e.code == NailgunException.CONNECT_FAILED:
return False
else:
raise
return True
def _get_buck_version_uid(self):
raise NotImplementedError()
def _get_bootstrap_classpath(self):
raise NotImplementedError()
def _get_java_classpath(self):
raise NotImplementedError()
def _get_extra_java_args(self):
return []
def _get_java_args(self, version_uid, extra_default_options=[]):
java_args = [] if is_java8() else ["-XX:MaxPermSize=256m"]
java_args.extend([
"-Xmx{0}m".format(JAVA_MAX_HEAP_SIZE_MB),
"-Djava.awt.headless=true",
"-Djava.util.logging.config.class=com.facebook.buck.cli.bootstrapper.LogConfig",
"-Dbuck.test_util_no_tests_dir=true",
"-Dbuck.version_uid={0}".format(version_uid),
"-Dbuck.buckd_dir={0}".format(self._buck_project.buckd_dir),
"-Dorg.eclipse.jetty.util.log.class=org.eclipse.jetty.util.log.JavaUtilLog",
])
for resource in EXPORTED_RESOURCES:
if self._has_resource(resource):
java_args.append(
"-Dbuck.{0}={1}".format(
resource.name, self._get_resource(resource)))
if sys.platform == "darwin":
java_args.append("-Dbuck.enable_objc=true")
java_args.append("-Djava.library.path=" + os.path.dirname(
self._get_resource(
Resource("libjcocoa.dylib"))))
if os.environ.get("BUCK_DEBUG_MODE"):
java_args.append("-agentlib:jdwp=transport=dt_socket,"
"server=y,suspend=y,address=8888")
if os.environ.get("BUCK_DEBUG_SOY"):
java_args.append("-Dbuck.soy.debug=true")
java_args.extend(extra_default_options)
if self._buck_project.buck_javaargs:
java_args.extend(shlex.split(self._buck_project.buck_javaargs))
if self._buck_project.buck_javaargs_local:
java_args.extend(shlex.split(self._buck_project.buck_javaargs_local))
java_args.extend(self._get_extra_java_args())
extra_java_args = os.environ.get("BUCK_EXTRA_JAVA_ARGS")
if extra_java_args:
java_args.extend(shlex.split(extra_java_args))
return java_args
def _platform_path(self, path):
if sys.platform != 'cygwin':
return path
return subprocess.check_output(['cygpath', '-w', path]).strip()
#
# an almost exact copy of the shutil.which() implementation from python3.4
#
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to
# the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
_java8 = None
def is_java8():
global _java8
if _java8 is not None:
return _java8
try:
cmd = ['java', '-Xms64m', '-version']
output = check_output(cmd, stderr=subprocess.STDOUT)
version_line = output.strip().splitlines()[0]
m = re.compile('(openjdk|java) version "1\.8\..*').match(version_line)
_java8 = bool(m)
return _java8
except CalledProcessError as e:
print(e.output, file=sys.stderr)
raise e
def install_signal_handlers():
if os.name == 'posix':
signal.signal(
signal.SIGUSR1,
lambda sig, frame: traceback.print_stack(frame))
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for gitdeps.py.
This test requires git to be in the path, and requires internet connectivity.
It runs tests against a repository hosted on github.com.
"""
import gitdeps
import itertools
import json
import logging
import os
import subprocess
import sys
import tempfile
import unittest
_LOGGER = logging.getLogger(os.path.basename(__file__))
def _Shell(*cmd, **kw):
"""Runs |cmd|, returning the results from Popen(cmd).communicate(). Additional
keyword arguments are passed on to subprocess.Popen.
"""
_LOGGER.debug('Executing %s.', cmd)
kw['shell'] = True
kw.setdefault('stdout', subprocess.PIPE)
kw.setdefault('stderr', subprocess.PIPE)
prog = subprocess.Popen(cmd, **kw)
stdout, stderr = prog.communicate()
if prog.returncode != 0:
raise RuntimeError('Command "%s" returned %d.' % (cmd, prog.returncode))
return (stdout, stderr)
class ScopedTempDir(object):
"""A class that creates a temporary directory that dies when it does."""
def __init__(self):
"""Creates the temporary directory and initializes |path|."""
self.path = tempfile.mkdtemp(prefix='gitdeps_test_')
def __del__(self):
"""Destroys the temporary directory."""
_Shell('rmdir', '/S', '/Q', self.path)
def _CountChildDirectories(path):
"""Returns the number of child directories there are in the given |path|."""
for dummy_root, dirs, dummy_files in os.walk(path):
return len(dirs)
def _WriteDeps(deps, path):
"""Writes the provided |deps| to the given |path|."""
with open(path, 'wb') as io:
io.write('deps = ')
io.write(json.dumps(deps, indent=2))
io.write('\n')
class TestGitDeps(unittest.TestCase):
"""Unittests for the gitdeps script."""
def setUp(self):
"""Runs before every test in this fixture."""
self._temp_dir = None
self._dummy_repo_path = 'https://github.com/chhamilton/test_repo.git'
def temp_dir(self):
if self._temp_dir is None:
self._temp_dir = ScopedTempDir()
return self._temp_dir.path
def tearDown(self):
# This will lose the last reference to the temp directory and cause it to
# be torn down.
self._temp_dir = None
def _BuildTestRepoPaths(self):
"""Sets up the various paths for checking out the test repo."""
# pylint: disable=W0201
self._cache_dir = os.path.join(self.temp_dir(), 'cache_dir')
self._output_dir = os.path.join(self.temp_dir(), 'output_dir')
self._checkout_dir_rel = 'repo'
self._junctions_path = os.path.join(self._cache_dir, '.gitdeps_junctions')
self._checkout_dir_abs = os.path.join(self._output_dir,
self._checkout_dir_rel)
self._script_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'gitdeps.py'))
def _RunScript(self, cache_dir=None, output_dir=None, deps_paths=None,
verbose=False, cwd=None, stdout=None, stderr=None):
"""Runs the gitdeps.py script with the provided arguments. If |cache_dir|
is not specified then defaults to self._cache_dir. If the other arguments
are not specified then they are left unspecified on the command line.
"""
if deps_paths is None:
deps_paths = []
cmd = [sys.executable,
self._script_path,
'--cache-dir=%s' % (cache_dir if cache_dir else self._cache_dir)]
if output_dir:
cmd.append('--output-dir=%s' % output_dir)
if verbose:
cmd.append('--verbose')
cmd += deps_paths
stdo, stde = _Shell(*cmd, cwd=cwd, stdout=stdout, stderr=stderr)
return (stdo, stde)
def _TestSuccessBasicCheckout(self,
create_cache_dir,
specify_output_dir,
specify_deps_file,
pull_full_repo):
self._BuildTestRepoPaths()
# Determine 'cwd' and 'output_dir' parameters.
output_dir = None
cwd = self._output_dir
if specify_output_dir:
output_dir = self._output_dir
cwd = None
# Determine the deps file path.
deps_path = os.path.join(self.temp_dir(), 'gitdeps.txt')
deps_paths = [deps_path]
if not specify_deps_file:
if not cwd:
cwd = self.temp_dir()
deps_path = os.path.join(cwd, 'GITDEPS')
deps_paths = []
# Create the directories.
if create_cache_dir:
os.mkdir(self._cache_dir)
os.mkdir(self._output_dir)
directories_to_checkout = [] if pull_full_repo else ['foo']
# Create and write the deps file.
deps = {
self._checkout_dir_rel:
(self._dummy_repo_path, directories_to_checkout, 'rev2')
}
_WriteDeps(deps, deps_path)
# Run the script.
self._RunScript(output_dir=output_dir,
deps_paths=deps_paths,
verbose=True,
cwd=cwd)
# Ensure the checkout was created as expected.
self.assertTrue(os.path.isdir(self._cache_dir))
self.assertEqual(1, _CountChildDirectories(self._cache_dir))
self.assertTrue(os.path.isfile(self._junctions_path))
self.assertTrue(os.path.isdir(self._checkout_dir_abs))
# pylint: disable=W0212
if pull_full_repo:
self.assertNotEqual(None,
gitdeps._GetJunctionInfo(self._checkout_dir_abs))
else:
for directory in directories_to_checkout:
junction = os.path.join(self._checkout_dir_abs, directory)
self.assertNotEqual(None, gitdeps._GetJunctionInfo(junction))
def _TestSuccessCheckoutReuse(self, reuse_refspec):
"""A test that checks reuse of a cached repository by checking out a
second time with a different refspec.
"""
self._TestSuccessBasicCheckout(True, True, True, True)
# Create and write the deps file.
deps_path = os.path.join(self.temp_dir(), 'gitdeps.txt')
deps = {
self._checkout_dir_rel: (self._dummy_repo_path, [], reuse_refspec)
}
_WriteDeps(deps, deps_path)
# Run the script.
self._RunScript(output_dir=self._output_dir,
deps_paths=[deps_path],
verbose=True)
# Ensure the checkout was created as expected.
self.assertTrue(os.path.isdir(self._cache_dir))
self.assertEqual(1, _CountChildDirectories(self._cache_dir))
self.assertTrue(os.path.isfile(self._junctions_path))
self.assertTrue(os.path.isdir(self._checkout_dir_abs))
# pylint: disable=W0212
self.assertNotEqual(None, gitdeps._GetJunctionInfo(self._checkout_dir_abs))
def testCheckoutReuseForwards(self):
"""Tests that repository reuse is okay when moving to a child reference."""
self._TestSuccessCheckoutReuse('master')
def testCheckoutReuseBackwards(self):
"""Tests that repository reuse is okay when moving to a parent reference."""
self._TestSuccessCheckoutReuse('rev1')
def testMultipleAndRemovedCheckouts(self):
"""Tests that multiple repository checkouts works, as well as removal of
orphaned checkouts due to removal from the deps file.
"""
self._BuildTestRepoPaths()
os.mkdir(self._cache_dir)
os.mkdir(self._output_dir)
checkout_dir_rel2 = os.path.join('repo2', 'nested')
checkout_dir_abs2 = os.path.join(self._output_dir, checkout_dir_rel2)
# Create and write the deps file.
deps_path = os.path.join(self.temp_dir(), 'gitdeps.txt')
deps = {
self._checkout_dir_rel: (self._dummy_repo_path, [], 'rev2'),
checkout_dir_rel2: (self._dummy_repo_path, [], 'rev3')
}
_WriteDeps(deps, deps_path)
# Run the script.
self._RunScript(output_dir=self._output_dir,
deps_paths=[deps_path],
verbose=True)
# Ensure the checkout was created as expected.
self.assertTrue(os.path.isdir(self._cache_dir))
self.assertEqual(2, _CountChildDirectories(self._cache_dir))
self.assertTrue(os.path.isfile(self._junctions_path))
self.assertTrue(os.path.isdir(self._checkout_dir_abs))
# pylint: disable=W0212
self.assertNotEqual(None, gitdeps._GetJunctionInfo(self._checkout_dir_abs))
self.assertTrue(os.path.isdir(checkout_dir_abs2))
# pylint: disable=W0212
self.assertNotEqual(None, gitdeps._GetJunctionInfo(checkout_dir_abs2))
# Rewrite the deps file, removing the nested junction.
deps = {
self._checkout_dir_rel: (self._dummy_repo_path, [], 'rev2'),
}
_WriteDeps(deps, deps_path)
# Run the script.
self._RunScript(output_dir=self._output_dir,
deps_paths=[deps_path],
verbose=True)
# Ensure the checkout was created as expected.
self.assertTrue(os.path.isdir(self._cache_dir))
self.assertEqual(1, _CountChildDirectories(self._cache_dir))
self.assertTrue(os.path.isfile(self._junctions_path))
self.assertTrue(os.path.isdir(self._checkout_dir_abs))
self.assertNotEqual(None, gitdeps._GetJunctionInfo(self._checkout_dir_abs))
# repo2/nested shouldn't exist, but neither should repo2 (as the directory
# is empty and should have been removed).
self.assertFalse(os.path.exists(checkout_dir_abs2))
self.assertFalse(os.path.exists(os.path.dirname(checkout_dir_abs2)))
def generateParameterizedTests():
for combination in itertools.product([True, False], repeat=4):
create_cache_dir, specify_output_dir, specify_deps_file, pull_full_repo = (
combination)
testName = 'testSuccess'
testName += 'EmptyCacheDir' if create_cache_dir else 'NoCacheDir'
testName += 'EmptyOutputDir' if specify_output_dir else 'NoOutputDir'
testName += 'SpecifiedDeps' if specify_deps_file else 'ImplicitDeps'
testName += 'PullFullRepo' if pull_full_repo else 'UseSparseCheckout'
setattr(TestGitDeps, testName,
lambda self: self._TestSuccessBasicCheckout(create_cache_dir,
specify_output_dir,
specify_deps_file,
pull_full_repo))
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
generateParameterizedTests()
unittest.main()
| |
import sys, os, glob
import re
import subprocess
import numpy as np
from astropy.table import Table, vstack
from desiutil.log import get_logger
import desispec.io
from desispec.workflow.exptable import get_exposure_table_pathname
from desispec.workflow import batch
from desispec.util import parse_int_args
def parse(options=None):
import argparse
p = argparse.ArgumentParser()
p.add_argument("-n", "--night", type=int, nargs='+', help="YEARMMDD nights")
p.add_argument("-t", "--tileid", type=int, help="Tile ID")
p.add_argument("-e", "--expid", type=int, nargs='+', help="exposure IDs")
p.add_argument("-s", "--spectrographs", type=str,
help="spectrographs to include, e.g. 0-4,9; includes final number in range")
p.add_argument("-g", "--group", type=str, required=True,
help="cumulative, pernight, perexp, or a custom name")
p.add_argument("--run_zmtl", action="store_true",
help="also run make_zmtl_files")
p.add_argument("--explist", type=str,
help="file with columns TILE NIGHT EXPID to use")
p.add_argument("--nosubmit", action="store_true",
help="generate scripts but don't submit batch jobs")
p.add_argument("--noafterburners", action="store_true",
help="Do not run afterburners (like QSO fits)")
p.add_argument("--batch-queue", type=str, default='realtime',
help="batch queue name")
p.add_argument("--batch-reservation", type=str,
help="batch reservation name")
p.add_argument("--batch-dependency", type=str,
help="job dependencies passed to sbatch --dependency")
p.add_argument("--system-name", type=str,
help="batch system name, e.g. cori-haswell, cori-knl, perlmutter-gpu")
# TODO
# p.add_argument("--outdir", type=str, help="output directory")
# p.add_argument("--scriptdir", type=str, help="script directory")
# p.add_argument("--per-exposure", action="store_true",
# help="fit redshifts per exposure instead of grouping")
if options is None:
args = p.parse_args()
else:
args = p.parse_args(options)
return args
def main(args):
batch_scripts, failed_jobs = generate_tile_redshift_scripts(**args.__dict__)
num_error = len(failed_jobs)
sys.exit(num_error)
def get_tile_redshift_relpath(tileid,group,night=None,expid=None):
"""
Determine the relative output directory of the tile redshift batch script for spectra+coadd+redshifts for a tile
Args:
tileid (int): Tile ID
group (str): cumulative, pernight, perexp, or a custom name
night (int): Night
expid (int): Exposure ID
Returns:
outdir (str): the relative path of output directory of the batch script from the specprod/run/scripts
"""
log = get_logger()
# - output directory relative to reduxdir
if group == 'cumulative':
outdir = f'tiles/{group}/{tileid}/{night}'
elif group == 'pernight':
outdir = f'tiles/{group}/{tileid}/{night}'
elif group == 'perexp':
outdir = f'tiles/{group}/{tileid}/{expid:08d}'
elif group == 'pernight-v0':
outdir = f'tiles/{tileid}/{night}'
else:
outdir = f'tiles/{group}/{tileid}'
log.warning(f'Non-standard tile group={group}; writing outputs to {outdir}/*')
return outdir
def get_tile_redshift_script_pathname(tileid,group,night=None,expid=None):
"""
Generate the pathname of the tile redshift batch script for spectra+coadd+redshifts for a tile
Args:
tileid (int): Tile ID
group (str): cumulative, pernight, perexp, or a custom name
night (int): Night
expid (int): Exposure ID
Returns:
(str): the pathname of the tile redshift batch script
"""
reduxdir = desispec.io.specprod_root()
outdir = get_tile_redshift_relpath(tileid,group,night=night,expid=expid)
scriptdir = f'{reduxdir}/run/scripts/{outdir}'
suffix = get_tile_redshift_script_suffix(tileid,group,night=night,expid=expid)
batchscript = f'coadd-redshifts-{suffix}.slurm'
return os.path.join(scriptdir, batchscript)
def get_tile_redshift_script_suffix(tileid,group,night=None,expid=None):
"""
Generate the suffix of the tile redshift batch script for spectra+coadd+redshifts for a tile
Args:
tileid (int): Tile ID
group (str): cumulative, pernight, perexp, or a custom name
night (int): Night
expid (int): Exposure ID
Returns:
suffix (str): the suffix of the batch script
"""
log = get_logger()
if group == 'cumulative':
suffix = f'{tileid}-thru{night}'
elif group == 'pernight':
suffix = f'{tileid}-{night}'
elif group == 'perexp':
suffix = f'{tileid}-exp{expid:08d}'
elif group == 'pernight-v0':
suffix = f'{tileid}-{night}'
else:
suffix = f'{tileid}-{group}'
log.warning(f'Non-standard tile group={group}; writing outputs to {suffix}.*')
return suffix
def batch_tile_redshifts(tileid, exptable, group, spectrographs=None,
submit=False, queue='realtime', reservation=None,
dependency=None, system_name=None, run_zmtl=False,
noafterburners=False):
"""
Generate batch script for spectra+coadd+redshifts for a tile
Args:
tileid (int): Tile ID
exptable (Table): has columns NIGHT EXPID to use; ignores other columns.
Doesn't need to be full pipeline exposures table (but could be)
group (str): cumulative, pernight, perexp, or a custom name
Options:
spectrographs (list of int): spectrographs to include
submit (bool): also submit batch script to queue
queue (str): batch queue name
reservation (str): batch reservation name
dependency (str): passed to sbatch --dependency upon submit
system_name (str): batch system name, e.g. cori-haswell, perlmutter-gpu
run_zmtl (bool): if True, also run make_zmtl_files
noafterburners (bool): if True, do not run QSO afterburners
Returns tuple (scriptpath, error):
scriptpath (str): full path to generated script
err (int): return code from submitting job (0 if submit=False)
By default this generates the script but don't submit it
"""
log = get_logger()
if spectrographs is None:
spectrographs = (0,1,2,3,4,5,6,7,8,9)
if (group == 'perexp') and len(exptable)>1:
msg = f'group=perexp requires 1 exptable row, not {len(exptable)}'
log.error(msg)
raise ValueError(msg)
nights = np.unique(np.asarray(exptable['NIGHT']))
if (group in ['pernight', 'pernight-v0']) and len(nights)>1:
msg = f'group=pernight requires all exptable rows to be same night, not {nights}'
log.error(msg)
raise ValueError(msg)
tileids = np.unique(np.asarray(exptable['TILEID']))
if len(tileids)>1:
msg = f'batch_tile_redshifts requires all exptable rows to be same tileid, not {tileids}'
log.error(msg)
raise ValueError(msg)
elif len(tileids) == 1 and tileids[0] != tileid:
msg = f'Specified tileid={tileid} didnt match tileid given in exptable, {tileids}'
log.error(msg)
raise ValueError(msg)
spectro_string = ' '.join([str(sp) for sp in spectrographs])
num_nodes = len(spectrographs)
frame_glob = list()
for night, expid in zip(exptable['NIGHT'], exptable['EXPID']):
frame_glob.append(f'exposures/{night}/{expid:08d}/cframe-[brz]$SPECTRO-{expid:08d}.fits')
#- Be explicit about naming. Night should be the most recent Night.
#- Expid only used for labeling perexp, for which there is only one row here anyway
night = np.max(exptable['NIGHT'])
expid = np.min(exptable['EXPID'])
frame_glob = ' '.join(frame_glob)
batchscript = get_tile_redshift_script_pathname(
tileid, group, night=night, expid=expid)
batchlog = batchscript.replace('.slurm', r'-%j.log')
scriptdir = os.path.split(batchscript)[0]
os.makedirs(scriptdir, exist_ok=True)
outdir = get_tile_redshift_relpath(tileid, group, night=night, expid=expid)
suffix = get_tile_redshift_script_suffix(
tileid, group, night=night, expid=expid)
jobname = f'redrock-{suffix}'
write_redshift_script(batchscript, outdir,
jobname=jobname,
num_nodes=num_nodes,
group=group,
spectro_string=spectro_string, suffix=suffix,
frame_glob=frame_glob,
queue=queue, system_name=system_name,
onetile=True, tileid=tileid, night=night, expid=expid,
run_zmtl=run_zmtl, noafterburners=noafterburners)
err = 0
if submit:
cmd = ['sbatch' ,]
if reservation:
cmd.extend(['--reservation', reservation])
if dependency:
cmd.extend(['--dependency', dependency])
# - sbatch requires the script to be last, after all options
cmd.append(batchscript)
err = subprocess.call(cmd)
basename = os.path.basename(batchscript)
if err == 0:
log.info(f'submitted {basename}')
else:
log.error(f'Error {err} submitting {basename}')
return batchscript, err
def write_redshift_script(batchscript, outdir,
jobname, num_nodes,
group, spectro_string, suffix,
frame_glob=None, expfile=None,
healpix=None,
extra_header=None,
queue='regular', system_name=None,
onetile=True, tileid=None, night=None, expid=None,
run_zmtl=False, noafterburners=False,
redrock_nodes=1, redrock_cores_per_rank=1,
):
"""
Write a batch script for running coadds, redshifts, and afterburners
Args:
batchscript (str): filepath to batch script to write
outdir (str): output directory to write data
jobname (str): slurm job name
num_nodes (int): number of nodes to allocate
group (str): used for tile redshifts, e.g. 'cumulative'
spectro_string (str): e.g. '0 1 2 3' spectrographs to run
suffix (str): filename suffix (e.g. TILEID-thruNIGHT)
Options:
frame_glob (str): glob for finding input cframes
expfile (str): filename with NIGHT EXPID SPECTRO
healpix (int): healpix number (to use with group=healpix)
extra_header (dict): extra key/value pairs to add to header
queue (str): queue name
system_name (str): e.g. cori-haswell, cori-knl, perlmutter-gpu
onetile (bool): coadd assuming input is for a single tile?
tileid (int): tileid to process; only needed for group='cumulative'
night (int): process through or on night YEARMMDD; for group='cumulative' and 'pernight'
expid (int): expid for group='perexp'
run_zmtl (bool): if True, also run zmtl
noafterburners (bool): if True, skip QSO afterburners
redrock_nodes (int): number of nodes for each redrock call
redrock_cores_per_rank (int): number of cores/rank to use for redrock
Note: some of these options are hacked to also be used by healpix_redshifts,
e.g. by providing spectro_string='sv3' instead of list of spectrographs.
Note: Use redrock_cores_per_rank > 1 to reserve extra memory per rank
for large input coadd files (e.g. sv3 healpix).
Note: must specify frame_glob for tile-based groups, and expfile for
group=healpix.
"""
log = get_logger()
if redrock_nodes > num_nodes:
msg = f'redrock_nodes ({redrock_nodes}) cannot be larger than job num_nodes ({num_nodes})'
log.error(msg)
raise ValueError(msg)
batch_config = batch.get_config(system_name)
batchlog = batchscript.replace('.slurm', r'-%j.log')
#- tileid and night are required for cumulative redshifts but not others
#- (frameglob captures the info for other cases)
if group in ('cumulative', 'pernight'):
err = False
if tileid is None:
log.error(f"group='{group}' requires tileid to be set")
err = True
if night is None:
log.error(f"group='{group}' requires night to be set")
err = True
if err:
raise ValueError(f"group='{group}' missing tileid and/or night")
if onetile:
onetileopt = '--onetile'
else:
onetileopt = ''
#- header keywords to record spectra grouping
headeropt = f'--header SPGRP={group}'
if group in ('cumulative', 'pernight'):
headeropt += f' SPGRPVAL={night} NIGHT={night}'
elif group == 'perexp':
headeropt += f' SPGRPVAL={expid} NIGHT={night} EXPID={expid}'
elif group == 'healpix':
headeropt += f' SPGRPVAL={healpix}'
else:
headeropt += f' SPGRPVAL=None'
if group != 'healpix':
headeropt += f' TILEID={tileid} SPECTRO=$SPECTRO PETAL=$SPECTRO'
if extra_header is not None:
for key, value in extra_header.items():
headeropt += f' {key}={value}'
#- system specific options, e.g. "--constraint=haswell"
batch_opts = list()
if 'batch_opts' in batch_config:
for opt in batch_config['batch_opts']:
batch_opts.append(f'#SBATCH {opt}')
batch_opts = '\n'.join(batch_opts)
runtime = 10 + int(10 * batch_config['timefactor'] * redrock_cores_per_rank)
#- some healpix have lots of targets; adhoc increase runtime
if group == 'healpix':
runtime += 15
runtime_hh = runtime // 60
runtime_mm = runtime % 60
cores_per_node = batch_config['cores_per_node']
threads_per_core = batch_config['threads_per_core']
threads_per_node = cores_per_node * threads_per_core
logdir = os.path.join(outdir, 'logs')
if system_name=='perlmutter-gpu':
account='desi_g'
else:
account='desi'
with open(batchscript, 'w') as fx:
fx.write(f"""#!/bin/bash
#SBATCH -N {num_nodes}
#SBATCH --account {account}
#SBATCH --qos {queue}
#SBATCH --job-name {jobname}
#SBATCH --output {batchlog}
#SBATCH --time={runtime_hh:02d}:{runtime_mm:02d}:00
#SBATCH --exclusive
{batch_opts}
echo --- Starting at $(date)
START_TIME=$SECONDS
pushd $DESI_SPECTRO_REDUX/$SPECPROD
mkdir -p {outdir}
mkdir -p {logdir}
echo
echo --- Generating files in {outdir}
echo""")
if frame_glob is not None:
fx.write(f"""
echo --- Grouping frames to spectra at $(date)
for SPECTRO in {spectro_string}; do
spectra={outdir}/spectra-$SPECTRO-{suffix}.fits
splog={logdir}/spectra-$SPECTRO-{suffix}.log
if [ -f $spectra ]; then
echo $(basename $spectra) already exists, skipping grouping
else
# Check if any input frames exist
CFRAMES=$(ls {frame_glob})
MISSING_CFRAMES=$?
NUM_CFRAMES=$(echo $CFRAMES | wc -w)
if [ $MISSING_CFRAMES -ne 0 ] && [ $NUM_CFRAMES -gt 0 ]; then
echo ERROR: some expected cframes missing for spectrograph $SPECTRO but proceeding anyway
fi
if [ $NUM_CFRAMES -gt 0 ]; then
echo Grouping $NUM_CFRAMES cframes into $(basename $spectra), see $splog
cmd="srun -N 1 -n 1 -c {threads_per_node} --cpu-bind=none desi_group_spectra --inframes $CFRAMES --outfile $spectra {headeropt}"
echo RUNNING $cmd &> $splog
$cmd &>> $splog &
sleep 0.5
else
echo ERROR: no input cframes for spectrograph $SPECTRO, skipping
fi
fi
done
echo Waiting for desi_group_spectra to finish at $(date)
wait
""")
elif expfile is not None and group == 'healpix':
fx.write(f"""
echo --- Grouping frames to spectra at $(date)
for SPECTRO in {spectro_string}; do
spectra={outdir}/spectra-$SPECTRO-{suffix}.fits
splog={logdir}/spectra-$SPECTRO-{suffix}.log
if [ -f $spectra ]; then
echo $(basename $spectra) already exists, skipping grouping
else
cmd="desi_group_spectra --expfile {expfile} --outfile $spectra --healpix {healpix} {headeropt}"
echo RUNNING $cmd &> $splog
$cmd &>> $splog
fi
done
""")
fx.write(f"""
echo
echo --- Coadding spectra at $(date)
for SPECTRO in {spectro_string}; do
spectra={outdir}/spectra-$SPECTRO-{suffix}.fits
coadd={outdir}/coadd-$SPECTRO-{suffix}.fits
colog={logdir}/coadd-$SPECTRO-{suffix}.log
if [ -f $coadd ]; then
echo $(basename $coadd) already exists, skipping coadd
elif [ -f $spectra ]; then
echo Coadding $(basename $spectra) into $(basename $coadd), see $colog
cmd="srun -N 1 -n 1 -c {threads_per_node} --cpu-bind=none desi_coadd_spectra {onetileopt} --nproc 16 -i $spectra -o $coadd"
echo RUNNING $cmd &> $colog
$cmd &>> $colog &
sleep 0.5
else
echo ERROR: missing $(basename $spectra), skipping coadd
fi
done
echo Waiting for desi_coadd_spectra to finish at $(date)
wait
""")
fx.write(f"""
echo
echo --- Running redrock at $(date)
echo Using {redrock_nodes} nodes per redrock call
echo Using {redrock_cores_per_rank} cores per rank for redrock
for SPECTRO in {spectro_string}; do
coadd={outdir}/coadd-$SPECTRO-{suffix}.fits
redrock={outdir}/redrock-$SPECTRO-{suffix}.fits
rrdetails={outdir}/rrdetails-$SPECTRO-{suffix}.h5
rrlog={logdir}/redrock-$SPECTRO-{suffix}.log
if [ -f $redrock ]; then
echo $(basename $redrock) already exists, skipping redshifts
elif [ -f $coadd ]; then
echo Running redrock on $(basename $coadd), see $rrlog
cmd="srun -N {redrock_nodes} -n {cores_per_node*redrock_nodes//redrock_cores_per_rank} -c {threads_per_core*redrock_cores_per_rank} --cpu-bind=cores rrdesi_mpi -i $coadd -o $redrock -d $rrdetails"
echo RUNNING $cmd &> $rrlog
$cmd &>> $rrlog &
sleep 0.5
else
echo ERROR: missing $(basename $coadd), skipping redshifts
fi
done
echo Waiting for redrock to finish at $(date)
wait
""")
if group in ('pernight', 'cumulative'):
fx.write(f"""
echo
tileqa={outdir}/tile-qa-{suffix}.fits
if [ -f $tileqa ]; then
echo --- $(basename $tileqa) already exists, skipping desi_tile_qa
else
echo --- Running desi_tile_qa
tile_qa_log={logdir}/tile-qa-{tileid}-thru{night}.log
desi_tile_qa -g {group} -n {night} -t {tileid} &> $tile_qa_log
fi
""")
if run_zmtl:
fx.write(f"""
# These run fast; use a single node for all 10 petals without srun overhead
echo
echo --- Running make_zmtl_files at $(date)
for SPECTRO in {spectro_string}; do
coadd={outdir}/coadd-$SPECTRO-{suffix}.fits
redrock={outdir}/redrock-$SPECTRO-{suffix}.fits
zmtl={outdir}/zmtl-$SPECTRO-{suffix}.fits
zmtllog={logdir}/zmtl-$SPECTRO-{suffix}.log
if [ -f $zmtl ]; then
echo $(basename $zmtl) already exists, skipping make_zmtl_files
elif [[ -f $coadd && -f $redrock ]]; then
echo Running make_zmtl_files on $(basename $redrock), see $zmtllog
cmd="make_zmtl_files -in $redrock -out $zmtl"
echo RUNNING $cmd &> $zmtllog
$cmd &>> $zmtllog &
else
echo ERROR: missing $(basename $redrock) or $(basename $coadd), skipping zmtl
fi
done
echo Waiting for zmtl to finish at $(date)
wait
""")
if not noafterburners:
fx.write(f"""
echo
echo --- Running QSO afterburners at $(date)
for SPECTRO in {spectro_string}; do
coadd={outdir}/coadd-$SPECTRO-{suffix}.fits
redrock={outdir}/redrock-$SPECTRO-{suffix}.fits
qsomgii={outdir}/qso_mgii-$SPECTRO-{suffix}.fits
qsoqn={outdir}/qso_qn-$SPECTRO-{suffix}.fits
qsomgiilog={logdir}/qso_mgii-$SPECTRO-{suffix}.log
qsoqnlog={logdir}/qso_qn-$SPECTRO-{suffix}.log
# QSO MgII afterburner
if [ -f $qsomgii ]; then
echo $(basename $qsomgii) already exists, skipping QSO MgII afterburner
elif [ -f $redrock ]; then
echo Running QSO MgII afterburner, see $qsomgiilog
cmd="srun -N 1 -n 1 -c {threads_per_node} --cpu-bind=none desi_qso_mgii_afterburner --coadd $coadd --redrock $redrock --output $qsomgii --target_selection all --save_target all"
echo RUNNING $cmd &> $qsomgiilog
$cmd &>> $qsomgiilog &
sleep 0.5
else
echo ERROR: missing $(basename $redrock), skipping QSO MgII afterburner
fi
# QSO QuasarNet (QN) afterburner
if [ -f $qsoqn ]; then
echo $(basename $qsoqn) already exists, skipping QSO QuasarNet afterburner
elif [ -f $redrock ]; then
echo Running QSO QuasarNet afterburner, see $qsoqnlog
cmd="srun -N 1 -n 1 -c {threads_per_node} --cpu-bind=none desi_qso_qn_afterburner --coadd $coadd --redrock $redrock --output $qsoqn --target_selection all --save_target all"
echo RUNNING $cmd &> $qsoqnlog
$cmd &>> $qsoqnlog &
sleep 0.5
else
echo ERROR: missing $(basename $redrock), skipping QSO QN afterburner
fi
done
echo Waiting for QSO afterburners to finish at $(date)
wait
""")
fx.write(f"""
echo
echo --- Files in {outdir}:
for prefix in spectra coadd redrock zmtl qso_qn qso_mgii tile-qa; do
echo " " $(ls {outdir}/$prefix*.fits |& grep -v 'cannot access' | wc -l) $prefix
done
popd &> /dev/null
END_TIME=$SECONDS
DURATION_MINUTES=$(( ($END_TIME - $START_TIME)/60 ))
DURATION_SECONDS=$(( ($END_TIME - $START_TIME)%60 ))
echo
echo --- Done at $(date) in ${{DURATION_MINUTES}}m${{DURATION_SECONDS}}s
""")
log.info(f'Wrote {batchscript}')
def _read_minimal_exptables(nights=None):
"""
Read exposure tables while handling evolving formats
Args:
nights (list of int): nights to include (default all nights found)
Returns exptable with just columns TILEID, NIGHT, EXPID filtered by science
exposures with LASTSTEP='all' and TILEID>=0
Note: the returned table is *not* the full pipeline exposures table because
the format of that changed during SV1 and thus can't be stacked without
trimming down the columns. This trims to just the minimal columns
needed by desi_tile_redshifts.
"""
log = get_logger()
if nights is None:
reduxdir = desispec.io.specprod_root()
etab_files = glob.glob(f'{reduxdir}/exposure_tables/202???/exposure_table_202?????.csv')
else:
etab_files = list()
for night in nights:
etab_file = get_exposure_table_pathname(night)
if os.path.exists(etab_file):
etab_files.append(etab_file)
elif night >= 20201201:
log.error(f"Exposure table missing for night {night}")
else:
# - these are expected for the daily run, ok
log.debug(f"Exposure table missing for night {night}")
etab_files = sorted(etab_files)
exptables = list()
for etab_file in etab_files:
t = Table.read(etab_file)
keep = (t['OBSTYPE'] == 'science') & (t['TILEID'] >= 0)
if 'LASTSTEP' in t.colnames:
keep &= (t['LASTSTEP'] == 'all')
t = t[keep]
exptables.append(t['TILEID', 'NIGHT', 'EXPID'])
return vstack(exptables)
def generate_tile_redshift_scripts(group, night=None, tileid=None, expid=None, explist=None,
spectrographs=None,
run_zmtl=False, noafterburners=False,
batch_queue='realtime', batch_reservation=None,
batch_dependency=None, system_name=None, nosubmit=False):
"""
Creates a slurm script to run redshifts per tile. By default it also submits the job to Slurm. If nosubmit
is True, the script is created but not submitted to Slurm.
Args:
group (str): Type of coadd redshifts to run. Options are cumulative, pernight, perexp, or a custom name.
night (int, or list or np.array of int's): YEARMMDD nights.
tileid (int): Tile ID.
expid (int, or list or np.array of int's): Exposure IDs.
explist (str): File with columns TILE NIGHT EXPID to use
spectrographs (str or list of int): spectrographs to include
run_zmtl (bool): If True, also run make_zmtl_files
noafterburners (bool): If True, do not run QSO afterburners
batch_queue (str): Batch queue name. Default is 'realtime'.
batch_reservation (str): Batch reservation name.
batch_dependency (str): Job dependencies passed to sbatch --dependency .
system_name (str): Batch system name, e.g. cori-haswell, cori-knl, perlmutter-gpu.
nosubmit (bool): Generate scripts but don't submit batch jobs. Default is False.
Returns:
batch_scripts (list of str): The path names of the scripts created during the function call
that returned a null batcherr.
failed_jobs (list of str): The path names of the scripts created during the function call
that returned a batcherr.
"""
log = get_logger()
# - If --tileid, --night, and --expid are all given, create exptable
if ((tileid is not None) and (night is not None) and
(len(night) == 1) and (expid is not None)):
log.info('Creating exposure table from --tileid --night --expid options')
exptable = Table()
exptable['EXPID'] = expid
exptable['NIGHT'] = night[0]
exptable['TILEID'] = tileid
if explist is not None:
log.warning('Ignoring --explist, using --tileid --night --expid')
# - otherwise load exposure tables for those nights
elif explist is None:
if night is not None:
log.info(f'Loading production exposure tables for nights {night}')
else:
log.info(f'Loading production exposure tables for all nights')
exptable = _read_minimal_exptables(night)
else:
log.info(f'Loading exposure list from {explist}')
if explist.endswith('.fits'):
exptable = Table.read(explist, format='fits')
elif explist.endswith('.csv'):
exptable = Table.read(explist, format='ascii.csv')
elif explist.endswith('.ecsv'):
exptable = Table.read(explist, format='ascii.ecsv')
else:
exptable = Table.read(explist, format='ascii')
if night is not None:
keep = np.in1d(exptable['NIGHT'], night)
exptable = exptable[keep]
# - Filter exposure tables by exposure IDs or by tileid
# - Note: If exptable was created from --expid --night --tileid these should
# - have no effect, but are left in for code flow simplicity
if expid is not None:
keep = np.in1d(exptable['EXPID'], expid)
exptable = exptable[keep]
#expids = np.array(exptable['EXPID'])
tileids = np.unique(np.array(exptable['TILEID']))
# - if provided, tileid should be redundant with the tiles in those exps
if tileid is not None:
if not np.all(exptable['TILEID'] == tileid):
log.critical(f'Exposure TILEIDs={tileids} != --tileid={tileid}')
sys.exit(1)
elif tileid is not None:
keep = (exptable['TILEID'] == tileid)
exptable = exptable[keep]
#expids = np.array(exptable['EXPID'])
tileids = np.array([tileid, ])
else:
tileids = np.unique(np.array(exptable['TILEID']))
# - anything left?
if len(exptable) == 0:
log.critical(f'No exposures left after filtering by tileid/night/expid')
sys.exit(1)
if spectrographs is not None:
if isinstance(spectrographs, str):
spectrographs = parse_int_args(spectrographs, include_end=True)
else:
spectrographs = list(range(10))
# - If cumulative, find all prior exposures that also observed these tiles
# - NOTE: depending upon options, this might re-read all the exptables again
# - NOTE: this may not scale well several years into the survey
if group == 'cumulative':
log.info(f'{len(tileids)} tiles; searching for exposures on prior nights')
allexp = _read_minimal_exptables()
keep = np.in1d(allexp['TILEID'], tileids)
exptable = allexp[keep]
## Ensure we only include data for nights up to and including specified nights
if (night is not None):
lastnight = int(np.max(night))
exptable = exptable[exptable['NIGHT'] <= lastnight]
#expids = np.array(exptable['EXPID'])
tileids = np.unique(np.array(exptable['TILEID']))
# - Generate the scripts and optionally submit them
failed_jobs, batch_scripts = list(), list()
for tileid in tileids:
tilerows = (exptable['TILEID'] == tileid)
nights = np.unique(np.array(exptable['NIGHT'][tilerows]))
expids = np.unique(np.array(exptable['EXPID'][tilerows]))
log.info(f'Tile {tileid} nights={nights} expids={expids}')
submit = (not nosubmit)
opts = dict(
spectrographs=spectrographs,
submit=submit,
run_zmtl=run_zmtl,
noafterburners=noafterburners,
queue=batch_queue,
reservation=batch_reservation,
dependency=batch_dependency,
system_name=system_name,
)
if group == 'perexp':
for i in range(len(exptable[tilerows])):
batchscript, batcherr = batch_tile_redshifts(
tileid, exptable[tilerows][i:i + 1], group, **opts)
elif group in ['pernight', 'pernight-v0']:
for night in nights:
thisnight = exptable['NIGHT'] == night
batchscript, batcherr = batch_tile_redshifts(
tileid, exptable[tilerows & thisnight], group, **opts)
else:
batchscript, batcherr = batch_tile_redshifts(
tileid, exptable[tilerows], group, **opts)
if batcherr != 0:
failed_jobs.append(batchscript)
else:
batch_scripts.append(batchscript)
#- Report num_error but don't sys.exit for pipeline workflow needs, do that at script level
num_error = len(failed_jobs)
if num_error > 0:
tmp = [os.path.basename(filename) for filename in failed_jobs]
log.error(f'problem submitting {num_error} scripts: {tmp}')
#- Return batch_scripts for use in pipeline and failed_jobs for explicit exit code in script
return batch_scripts, failed_jobs
| |
from pyCovertAudio_lib import *
from BaseDemodulator import BaseDemodulator
from Debug import Debug
from SignalFunctions import SignalFunctions
import time
import math
class FHSSDemodulator(BaseDemodulator):
def __init__(
self, bitsPerSymbol, sampleRate, samplesPerSymbol,
symbolExpansionFactor, separationIntervals, configuration
):
configuration["carrierFrequency"] = 0.0
BaseDemodulator.__init__(
self,
bitsPerSymbol,
sampleRate,
samplesPerSymbol,
symbolExpansionFactor,
separationIntervals,
configuration
)
(
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth
) = \
python_BFSK_determine_frequencies(
self.samplesPerSymbol,
self.sampleRate,
0,
self.separationIntervals
)
try:
self.minimumFrequency = configuration["minimumFrequency"]
self.maximumFrequency = configuration["maximumFrequency"]
self.symbolFrequencyBandwidth = configuration[
'symbolFrequencyBandwidth']
self.bandwidthDivisor = configuration["bandwidthDivisor"]
self.bandwidth /= self.bandwidthDivisor
self.carrierFrequencies = \
SignalFunctions.getCarrierFrequencies(
self.minimumFrequency,
self.maximumFrequency,
self.bandwidth
)
self.numberOfSubChannels = len(self.carrierFrequencies)
except KeyError as e:
print "ERROR: Could not find key %s" % (str(e))
self.initializeSignals()
self.initializeFilters()
def initializeSignals(self):
self.interpolationGap = \
int(
2.0 *
(
(float(self.sampleRate) / 2.0) -
(self.carrierFrequencies[-1] + (self.bandwidth / 2.0))
)
)
print "Gap: %d" % (self.interpolationGap)
self.decimationFactor = \
int(
math.floor(
(2.0 * self.samplesPerSymbol) /
float(self.decimatedSamplesPerSymbol)
)
)
self.decimatedSampleRate = \
int(
math.ceil(
(2.0 * self.sampleRate) /
float(self.decimationFactor)
)
)
def initializeFilters(self):
self.filters = []
for carrierFrequency in self.carrierFrequencies:
(filter0, filter1) = self.initializeFilter(carrierFrequency)
self.filters.append(filter0)
self.filters.append(filter1)
def initializeFilter(self, carrierFrequency):
passbandSymbol0Frequency = carrierFrequency + self.symbol0Frequency
passbandSymbol1Frequency = carrierFrequency + self.symbol1Frequency
frequencySeparation = \
passbandSymbol1Frequency - passbandSymbol0Frequency
filter0 = \
python_initialize_kaiser_filter(
passbandSymbol0Frequency - frequencySeparation,
passbandSymbol0Frequency -
(float(self.symbolFrequencyBandwidth) / 2.0),
passbandSymbol0Frequency +
(float(self.symbolFrequencyBandwidth) / 2.0),
passbandSymbol0Frequency + frequencySeparation,
0.1,
80,
self.sampleRate
)
filter1 = \
python_initialize_kaiser_filter(
passbandSymbol1Frequency - frequencySeparation,
passbandSymbol1Frequency -
(float(self.symbolFrequencyBandwidth) / 2.0),
passbandSymbol1Frequency +
(float(self.symbolFrequencyBandwidth) / 2.0),
passbandSymbol1Frequency + frequencySeparation,
0.1,
80,
self.sampleRate
)
filterDelay0 = python_filter_get_group_delay(filter0)
filterDelay1 = python_filter_get_group_delay(filter1)
print "Symbol filter delays:\t1=%d\t2=%d" \
% (filterDelay0, filterDelay1)
return(filter0, filter1)
def combineSignal(self, signal):
filtered = []
interpolated = []
squared = []
decimated = []
normalized = []
filterStartTime = time.time()
for signalFilter in self.filters:
filterDelay = python_filter_get_group_delay(signalFilter)
filteredSignal = python_filter_signal(signalFilter, signal)
filteredSignal = filteredSignal[filterDelay:]
filtered.append(filteredSignal)
filterTime = time.time() - filterStartTime
print "Filter time: %.04f" % (filterTime)
for i in range(len(filtered)):
Debug.instance.debugSignal(
"filteredSignal%d.WAV" % (i),
filtered[i],
self.sampleRate
)
interpolatorStartTime = time.time()
for filteredSignal in filtered:
interpolatedSignal = \
SignalFunctions.interpolateSignal(
filteredSignal,
self.sampleRate,
self.interpolationGap
)
interpolated.append(interpolatedSignal)
interpolatorTime = time.time() - interpolatorStartTime
print "Interpolation time: %.04f" % (interpolatorTime)
for i in range(len(interpolated)):
Debug.instance.debugSignal(
"interpolatedSignal%d.WAV" % (i),
interpolated[i],
2 * self.sampleRate
)
squareStartTime = time.time()
for interpolatedSignal in interpolated:
squaredSignal = \
SignalFunctions.squareSignal(
interpolatedSignal,
2 * self.sampleRate,
self.symbolFrequencyBandwidth,
self.interpolationGap
)
squared.append(squaredSignal)
squareTime = time.time() - squareStartTime
print "Square time: %.04f" % (squareTime)
for i in range(len(squared)):
Debug.instance.debugSignal(
"squaredSignal%d.WAV" % (i),
squared[i],
2 * self.sampleRate
)
decimateStartTime = time.time()
for squaredSignal in squared:
decimatedSignal = \
SignalFunctions.decimate(squaredSignal, self.decimationFactor)
decimated.append(decimatedSignal)
decimateTime = time.time() - decimateStartTime
print "Decimate time: %.04f" % (decimateTime)
for i in range(len(decimated)):
Debug.instance.debugSignal(
"signal%d.WAV" % (i),
decimated[i],
self.decimatedSampleRate
)
for decimatedSignal in decimated:
signal = SignalFunctions.normalizeSignal(decimatedSignal)
normalized.append(signal)
combinedSignal0 = []
combinedSignal1 = []
for i in range(len(normalized[0])):
maxValue = 0.0
for j in range(0, len(normalized), 2):
if(abs(normalized[j][i]) >= maxValue):
maxValue = abs(normalized[j][i])
combinedSignal0.append(maxValue)
maxValue = 0.0
for j in range(1, len(normalized), 2):
if(abs(normalized[j][i]) >= maxValue):
maxValue = abs(normalized[j][i])
combinedSignal1.append(maxValue)
Debug.instance.debugSignal(
"combinedSignal0.WAV",
combinedSignal0,
self.decimatedSampleRate
)
Debug.instance.debugSignal(
"combinedSignal1.WAV",
combinedSignal1,
self.decimatedSampleRate
)
signal0 = SignalFunctions.normalizeSignal(combinedSignal0)
signal1 = SignalFunctions.normalizeSignal(combinedSignal1)
signal1 = [-1.0 * x for x in signal1]
signal0Length = len(signal0)
signal1Length = len(signal1)
length = min(signal0Length, signal1Length)
result = [0.0 for i in range(length)]
for i in range(length):
if(abs(signal0[i]) >= abs(signal1[i])):
result[i] = signal0[i]
else:
result[i] = signal1[i]
Debug.instance.debugSignal(
'combined.WAV',
result,
self.decimatedSampleRate
)
return(result)
def demodulate(self, signal):
combined = self.combineSignal(signal)
symbols = self.getSymbols(combined)
return(symbols)
def toString(self):
return (
"Demodulator:\n\tAlgorithm:\t\t\tBFSK\n\tSymbol 0 frequency"
":\t\t%.02f\n\tSymbol 1 frequency:\t\t%.02f\n\tMin frequency"
" separation:\t%.02f\n\tBandwidth:\t\t\t%.02f\n\tFrequency"
" bandwidth:\t\t%d\n%s"
% (
self.symbol0Frequency,
self.symbol1Frequency,
self.deltaFrequency,
self.bandwidth,
self.symbolFrequencyBandwidth,
BaseDemodulator.toString(self)
)
)
def getSymbols(self, signal):
symbols = []
averaged = \
SignalFunctions.movingAverage(
signal,
self.symbolExpansionFactor * self.decimatedSamplesPerSymbol
)
Debug.instance.debugSignal(
'averaged.WAV',
averaged,
self.decimatedSampleRate
)
averaged = \
SignalFunctions.interpolateSignal(
averaged,
self.decimatedSampleRate,
1
)
Debug.instance.debugSignal(
'averagedInterpolated.WAV',
averaged,
2 * self.decimatedSampleRate
)
averaged = SignalFunctions.normalizeSignal(averaged)
Debug.instance.debugSequence('averaged.dat', averaged)
samplePoints = \
self.runGardnerAlgorithm(
averaged,
2 * self.symbolExpansionFactor *
self.decimatedSamplesPerSymbol,
2 * self.decimatedSampleRate
)
Debug.instance.debugSequence('samplePoints.dat', samplePoints)
for point in samplePoints:
symbol = 0 if(averaged[point] >= 0.0) else 1
symbols.append(symbol)
return(symbols)
def runGardnerAlgorithm(self, signal, samplesPerSymbol, sampleRate):
n = 1
offset = 0
samplePoints = []
print "Samples per symbol: %d\tSample rate: %d." \
% (samplesPerSymbol, sampleRate)
while(((n * samplesPerSymbol) + offset) < len(signal)):
nextPoint = n * samplesPerSymbol + offset
previousPoint = (n - 1) * samplesPerSymbol + offset
midPoint = \
int(nextPoint - int(math.floor(samplesPerSymbol / 2.0)))
samplePoints.append(nextPoint)
e = \
(signal[nextPoint] - signal[previousPoint]) * signal[midPoint]
if((signal[nextPoint] * signal[previousPoint]) < 0):
if(e < -0.0001):
offset += 1
elif(e > 0.0001):
offset -= 1
n += 1
return(samplePoints)
| |
""" Various kinds of lyaout widgets.
"""
from __future__ import absolute_import
from functools import wraps
from six import string_types
import logging
import copy
from ...properties import abstract
from ...properties import Int, Instance, List, String, Dict, Either
from ...util.functions import cached_property, arg_filter
from ...validation.warnings import EMPTY_LAYOUT
from ... import validation
from ..component import Component
from .widget import Widget
logger= logging.getLogger(__name__)
@abstract
class Layout(Widget):
""" An abstract base class for layout widgets. ``Layout`` is not
generally useful to instantiate on its own.
"""
width = Int(help="""
An optional width for the widget (in pixels).
""")
height = Int(help="""
An optional height for the widget (in pixels).
""")
@abstract
class BaseBox(Layout):
""" Abstract base class for HBox and VBox. Do not use directly.
"""
def __init__(self, *args, **kwargs):
if len(args) > 0 and "children" in kwargs:
raise ValueError("'children' keyword cannot be used with positional arguments")
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not isinstance(args[0], Component)):
# Note: check that not Component, in case Widget/Layout ever gets __iter__
kwargs["children"] = list(args[0])
elif len(args) > 0:
kwargs["children"] = list(args)
super(BaseBox, self).__init__(**kwargs)
@validation.warning(EMPTY_LAYOUT)
def _check_empty_layout(self):
from itertools import chain
if not list(chain(self.children)):
return str(self)
children = List(Instance(Component), help="""
The list of children, which can be other widgets (including layouts)
and plots.
""")
class HBox(BaseBox):
""" Lay out child widgets in a single horizontal row.
Children can be specified as positional arguments, as a single argument
that is a sequence, or using the ``children`` keyword argument.
"""
class VBox(BaseBox):
""" Lay out child widgets in a single vertical row.
Children can be specified as positional arguments, as a single argument
that is a sequence, or using the ``children`` keyword argument.
"""
# parent class only, you need to set the fields you want
class VBoxForm(VBox):
"""
Basically, a VBox, where all components (generally form stuff)
is wrapped in a <form> tag - important for bootstrap css
"""
class SimpleApp(Widget):
create_registry = {}
update_registry = {}
layout_registry = {}
name = String()
objects = Dict(String, Either(String, Instance(Component)))
widget_list = List(String, help="list of widgets, for ordering")
layout = Instance(Component)
@classmethod
def create(cls, name, widgets):
objects = {}
widget_list = []
for w in widgets:
objects[w.name] = w
widget_list.append(w.name)
obj = cls(name=name, objects=objects, widget_list=widget_list)
obj.set_output()
return obj
@cached_property
def widget_dict(self):
result = {}
for widget_name in self.widget_list:
result[widget_name] = self.objects[widget_name]
return result
def set_debounce(self):
self._debounce_called = {}
def clear_debounce(self):
delattr(self, "_debounce_called")
def process_user_result(self, result):
if isinstance(result, Component):
result = {'output' : result}
if isinstance(result, dict):
# hack - so we can detect a change
# kind of ok because it's a shallow copy
dummy = copy.copy(self.objects)
dummy.update(result)
self.objects = dummy
def callback(self, func):
from ...plotting import curdoc
@wraps(func)
def signature_change_call_once(obj, attrname, old, new):
debounce_called = getattr(self, "_debounce_called", None)
if debounce_called is not None and func.__name__ in debounce_called:
return
args = self.args_for_func(func)
logger.debug("calling %s", func.__name__)
result = func(**args)
self.process_user_result(result)
curdoc()._add_all()
if debounce_called is not None:
debounce_called[func.__name__] = True
return result
return signature_change_call_once
def setup_events(self):
## hack - what we want to do is execute the update callback once
## and only if some properties in the graph have changed
## so we set should_update to be True in setup_events, and
## set it to be false as soon as the callback is done
if not self.name:
return
to_delete = []
for k in self.__dict__.keys():
if k.startswith('_func'):
to_delete.append(k)
for k in to_delete:
self.__dict__.pop(k)
counter = 0
if not self.update_registry.get(self.name):
name = '_func%d' % counter
func = self.create_registry[self.name]
setattr(self, name, self.callback(func))
for widget_name in self.widget_list:
obj = self.objects.get(widget_name)
if obj:
for attr in obj.class_properties():
obj.on_change(attr, self, name)
return
for selectors, func in self.update_registry[self.name]:
#hack because we lookup callbacks by func name
name = '_func%d' % counter
counter += 1
setattr(self, name, self.callback(func))
for selector in selectors:
if isinstance(selector, string_types):
self.widget_dict[selector].on_change('value', self, name)
continue
elif isinstance(selector, tuple):
selector, attrs = selector
else:
attrs = None
for obj in self.select(selector):
if obj == self:
continue
if attrs:
toiter = attrs
else:
toiter = obj.class_properties()
for attr in toiter:
obj.on_change(attr, self, name)
self.set_debounce()
def args_for_func(self, func):
args = {}
for k,v in self.widget_dict.items():
if hasattr(v, 'value'):
args[k] = v.value
args['app'] = self
args = arg_filter(func, args)
return args
def set_output(self):
func = self.create_registry[self.name]
args = self.args_for_func(func)
result = func(**args)
self.process_user_result(result)
func = self.layout_registry.get(self.name)
if func:
self.layout = func(**self.args_for_func(func))
else:
self.layout = self.default_layout()
def default_layout(self):
widgets = [self.objects[x] for x in self.widget_list]
widgets = VBoxForm(children=widgets)
layout = AppHBox(children=[widgets, "output"], app=self)
return layout
class AppLayout(Layout):
app = Instance(SimpleApp)
class AppVBox(VBox, AppLayout):
"""VBox, except children can be other plot objects, as well as
strings (which are then evaluated in an app namespace for
de-referencing
"""
children = List(Either(Instance(Component), String), help="""
The list of children, which can be other widgets (including layouts)
and plots - or strings. If strings, there must be a corresponding app
which contains the widget/plot matching that string
""")
class AppHBox(HBox, AppLayout):
"""VBox, except children can be other plot objects, as well as
strings (which are then evaluated in an app namespace for
de-referencing
"""
children = List(Either(Instance(Component), String), help="""
The list of children, which can be other widgets (including layouts)
and plots - or strings. If strings, there must be a corresponding app
which contains the widget/plot matching that string
""")
class AppVBoxForm(VBox, AppLayout):
"""VBox, except children can be other plot objects, as well as
strings (which are then evaluated in an app namespace for
de-referencing
"""
children = List(Either(Instance(Component), String), help="""
The list of children, which can be other widgets (including layouts)
and plots - or strings. If strings, there must be a corresponding app
which contains the widget/plot matching that string
""")
| |
#! /usr/bin/python
# Test case to test multi-port functionality
# The configuration file for this test case specifies 2 different cores, each with a different
# data source. Three search terms are tested, each expected to be returned by one and only one
# of the cores. The usual syntax of the queriesAndResults.txt file has been extended to the
# following format:
# <search-term>||<core1 ID result set>@<core2 ID result set>@<core3 ID result set>
# where each ID result set is a space separated list of record IDs expected from the server.
# Specifically:
#
# Global ports:
# /info -> 8088
# /[other entrypoints] -> 8087
#
# Core 1: Movies, using global ports
# /info -> 8088
# /[other entrypoints] -> 8087
#
# Core 2: StackOverflow data
# /save -> 9087
# /export -> 9087
# /resetLogger -> 9087
# /docs -> 9087
# /update -> 9087
#
# In the test case, we send HTTP requests to those core-ports. Based on the configuration, some of
# the requests should succeed, and some should fail.
#
import sys, urllib2, json, time, subprocess, os, commands, signal, re
sys.path.insert(0, 'srch2lib')
import test_lib
port = '8087' # core1
core1InfoPort = '8088' # core1 - /info
core2ControlPort = '9087' # core2 - all the control messages
#Function of checking the results
def checkResult(query, responseJson,resultValue):
# for key, value in responseJson:
# print key, value
isPass=1
if len(responseJson) == len(resultValue):
for i in range(0, len(resultValue)):
#print response_json['results'][i]['record']['id']
if (resultValue.count(responseJson[i]['record']['id']) != 1):
isPass=0
print query+' test failed'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
for i in range(0, len(responseJson)):
print str(responseJson[i]['record']['id']) + '||' + resultValue[i]
break
else:
isPass=0
print query+' test failed - differing response lengths'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
maxLen = max(len(responseJson),len(resultValue))
for i in range(0, maxLen):
if i >= len(resultValue):
print str(responseJson[i]['record']['id'])+'||'
elif i >= len(responseJson):
print ' '+'||'+resultValue[i]
else:
print responseJson[i]['record']['id']+'||'+resultValue[i]
if isPass == 1:
print query+' test pass'
return 0
return 1
#prepare the query based on the valid syntax
def prepareQuery(queryKeywords, fuzzy):
query = ''
################# prepare main query part
query = query + 'q='
# local parameters
# query = query + '%7BdefaultPrefixComplete=COMPLETE%7D'
# keywords section
for i in range(0, len(queryKeywords)):
if fuzzy:
keyword = queryKeywords[i] + '~'
else:
keyword = queryKeywords[i]
if i == (len(queryKeywords)-1):
query=query+keyword # last keyword prefix
else:
query=query+keyword+'%20AND%20'
# print 'Query : ' + query
##################################
return query
def testMultipleCores(queriesAndResultsPath, binary_path):
if test_lib.confirmPortAvailable(port) == False:
print 'Port ' + str(port) + ' already in use - aborting'
return -1
#Start the engine server
args = [ binary_path, '--config-file=./multiport/conf-multiport.xml' ]
if test_lib.confirmPortAvailable(port) == False:
print 'Port ' + str(port) + ' already in use - aborting'
return -1
print 'starting engine: ' + args[0] + ' ' + args[1]
serverHandle = test_lib.startServer(args)
test_lib.pingServer(port)
failCount = 0
#######################################
# Basic multi-core functional testing #
#######################################
print "Test suite #1 - basic multi-core functionality"
f_in = open(queriesAndResultsPath, 'r')
for line in f_in:
#get the query keyword and results
value=line.split('||')
queryValue=value[0].split()
allResults=value[1].split('@')
coreNum=0
for coreResult in allResults:
resultValue=coreResult.split()
#construct the query
if coreNum == 0:
# test default core (unnamed core) on 0th iteration
query='http://localhost:' + port + '/search?'
else:
query='http://localhost:' + port + '/core' + str(coreNum) + '/search?'
query = query + prepareQuery(queryValue, False)
#do the query
response = urllib2.urlopen(query).read()
#print query + ' Got ==> ' + response
response_json = json.loads(response)
#check the result
failCount += checkResult(query, response_json['results'], resultValue)
coreNum += 1
f_in.close()
print "\nTest suite #2: Port security"
# Test if /info is indeed moved to another port
query='http://localhost:' + core1InfoPort + '/info'
#do the query
#print query
response = urllib2.urlopen(query).read()
#print response
response_json = json.loads(response)
if len(response_json) > 0:
if int(response_json['engine_status']['docs_in_index']) != 244:
failCount += 1
print "Info request did not return expected document count: Got " + str(response_json['engine_status']['docs_in_index']) + " but expected 244."
else:
print query + ' test pass'
else:
failCount += 1
print "Null response to info request"
# Test if /info is no longer on standard port (negative test)
query='http://localhost:' + port + '/info'
#do the query
#print query
try:
response = urllib2.urlopen(query).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
# Test if /search is not allowed in the /info port
query='http://localhost:' + core1InfoPort + '/search?q=foo'
#do the query
#print query
try:
response = urllib2.urlopen(query).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
# Same tests but with core1 explicitly in the path
# Test if /core1/info is indeed moved to another port
query='http://localhost:' + core1InfoPort + '/core1/info'
#do the query
#print query
response = urllib2.urlopen(query).read()
#print response
response_json = json.loads(response)
if len(response_json) > 0:
if int(response_json['engine_status']['docs_in_index']) != 244:
failCount += 1
print "Info request did not return expected document count: Got " + str(response_json['engine_status']['docs_in_index']) + " but expected 244."
else:
print query + ' test pass'
else:
failCount += 1
print "Null response to info request"
# Test if /core1/info is no longer on standard port (negative test)
query='http://localhost:' + port + '/core1/info'
#do the query
#print query
try:
response = urllib2.urlopen(query).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
# Test if /search is not allowed in the /core1/info port
query='http://localhost:' + core1InfoPort + '/core1/search?q=foo'
#do the query
#print query
try:
response = urllib2.urlopen(query).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
# Test if /core2/info is not allowed in the /core1/info port
query='http://localhost:' + core1InfoPort + '/core2/info'
#do the query
#print query
try:
response = urllib2.urlopen(query).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
print "\nTest suite #3: Control Port security"
# /save test
query='http://localhost:' + core2ControlPort + '/core2/save'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(query, '')
#request.add_header('Content-Type', 'your/contenttype')
request.get_method = lambda: 'PUT'
#do the query
#print query
response = opener.open(request).read()
# response = urllib2.urlopen(request).read()
#print response
response_json = json.loads(response)
if len(response_json) > 0:
if response_json['log'][0]['save'] != 'success':
failCount += 1
print "/save request did not return success"
else:
print query + ' test pass'
else:
failCount += 1
print "Null response to info request"
# /export
query='http://localhost:' + core2ControlPort + '/core2/export?exported_data_file=core2-exported.json'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(query, '')
#request.add_header('Content-Type', 'your/contenttype')
request.get_method = lambda: 'PUT'
#do the query
#print query
response = opener.open(request).read()
# response = urllib2.urlopen(request).read()
#print response
response_json = json.loads(response)
if len(response_json) > 0:
if response_json['log'][0]['export'] != 'success':
failCount += 1
print "/export request did not return success"
else:
print query + ' test pass'
else:
failCount += 1
print "Null response to save request"
# /resetLogger test
query='http://localhost:' + core2ControlPort + '/core2/resetLogger'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(query, '')
#request.add_header('Content-Type', 'your/contenttype')
request.get_method = lambda: 'PUT'
#do the query
#print query
response = opener.open(request).read()
# response = urllib2.urlopen(request).read()
#print response
response_json = json.loads(response)
if len(response_json) > 0:
if response_json['log']:
print query + ' test pass'
else:
failCount += 1
print "/resetLogger request did not return success"
else:
failCount += 1
print "Null response to resetLogger request"
# /core2/save on protected port test
query='http://localhost:' + port + '/core2/save'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(query, '')
#request.add_header('Content-Type', 'your/contenttype')
request.get_method = lambda: 'PUT'
#do the query
#print query
try:
response = opener.open(request).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
# /core2/export on protected port test
query='http://localhost:' + port + '/core2/export?exported_data_file=core2-exported.json'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(query, '')
#request.add_header('Content-Type', 'your/contenttype')
request.get_method = lambda: 'PUT'
#do the query
#print query
try:
response = opener.open(request).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
# /core2/resetLogger on protected port test
query='http://localhost:' + port + '/core2/resetLogger'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(query, '')
#request.add_header('Content-Type', 'your/contenttype')
request.get_method = lambda: 'PUT'
#do the query
#print query
try:
response = opener.open(request).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
# /core2/save on protected port test
query='http://localhost:' + core1InfoPort + '/core2/save'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(query, '')
#request.add_header('Content-Type', 'your/contenttype')
request.get_method = lambda: 'PUT'
#do the query
#print query
try:
response = opener.open(request).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
# /core2/export on protected port test
query='http://localhost:' + core1InfoPort + '/core2/export?exported_data_file=core2-exported.json'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(query, '')
#request.add_header('Content-Type', 'your/contenttype')
request.get_method = lambda: 'PUT'
#do the query
#print query
try:
response = opener.open(request).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
# /core2/resetLogger on protected port test
query='http://localhost:' + core1InfoPort + '/core2/resetLogger'
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(query, '')
#request.add_header('Content-Type', 'your/contenttype')
request.get_method = lambda: 'PUT'
#do the query
#print query
try:
response = opener.open(request).read()
#print response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failcount += 1
raise
print "\nTest suite #4 - Port security"
f_in = open(queriesAndResultsPath, 'r')
for line in f_in:
#get the query keyword and results
value=line.split('||')
queryValue=value[0].split()
allResults=value[1].split('@')
coreNum=0
for coreResult in allResults:
resultValue=coreResult.split()
#construct the query
if coreNum == 0:
# test default core (unnamed core) on 0th iteration
query='http://localhost:' + core1InfoPort + '/search?'
else:
query='http://localhost:' + core1InfoPort + '/core' + str(coreNum) + '/search?'
query = query + prepareQuery(queryValue, False)
try:
#do the query
response = urllib2.urlopen(query).read()
#print query + ' Got ==> ' + response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failCount += 1
raise
coreNum += 1
f_in.close()
f_in = open(queriesAndResultsPath, 'r')
for line in f_in:
#get the query keyword and results
value=line.split('||')
queryValue=value[0].split()
allResults=value[1].split('@')
coreNum=0
for coreResult in allResults:
resultValue=coreResult.split()
#construct the query
if coreNum == 0:
# test default core (unnamed core) on 0th iteration
query='http://localhost:' + core2ControlPort + '/search?'
else:
query='http://localhost:' + core2ControlPort + '/core' + str(coreNum) + '/search?'
query = query + prepareQuery(queryValue, False)
try:
#do the query
response = urllib2.urlopen(query).read()
#print query + ' Got ==> ' + response
response_json = json.loads(response)
except urllib2.HTTPError as err:
if err.code == 404:
print query + ' test pass'
else:
# did not get expected file not found error
failCount += 1
raise
coreNum += 1
f_in.close()
test_lib.killServer(serverHandle)
print '=============================='
return failCount
if __name__ == '__main__':
#Path of the query file
#each line like "trust||01c90b4effb2353742080000" ---- query||record_ids(results)
binary_path = sys.argv[1]
queriesAndResultsPath = sys.argv[2]
exitCode = testMultipleCores(queriesAndResultsPath, binary_path)
os._exit(exitCode)
| |
# A module to expose various thread/process/job related structures and
# methods from kernel32
#
# The MIT License
#
# Copyright (c) 2003-2004 by Peter Astrand <astrand@lysator.liu.se>
#
# Additions and modifications written by Benjamin Smedberg
# <benjamin@smedbergs.us> are Copyright (c) 2006 by the Mozilla Foundation
# <http://www.mozilla.org/>
#
# More Modifications
# Copyright (c) 2006-2007 by Mike Taylor <bear@code-bear.com>
# Copyright (c) 2007-2008 by Mikeal Rogers <mikeal@mozilla.com>
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of the
# author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from ctypes import c_void_p, POINTER, sizeof, Structure, windll, WinError, WINFUNCTYPE
from ctypes.wintypes import BOOL, BYTE, DWORD, HANDLE, LPCWSTR, LPWSTR, UINT, WORD
from subprocess import SW_HIDE
try:
from win32job import QueryInformationJobObject
except:
def QueryInformationJobObject(*args, **kwargs):
raise Exception("wait() has triggered a condition that requires pywin32. please install.")
print "pywin32 is not installed. Some wait() features in killableprocess may not function properly."
LPVOID = c_void_p
LPBYTE = POINTER(BYTE)
LPDWORD = POINTER(DWORD)
def ErrCheckBool(result, func, args):
"""errcheck function for Windows functions that return a BOOL True
on success"""
if not result:
raise WinError()
return args
# AutoHANDLE
class AutoHANDLE(HANDLE):
"""Subclass of HANDLE which will call CloseHandle() on deletion."""
CloseHandleProto = WINFUNCTYPE(BOOL, HANDLE)
CloseHandle = CloseHandleProto(("CloseHandle", windll.kernel32))
CloseHandle.errcheck = ErrCheckBool
def Close(self):
if self.value:
self.CloseHandle(self)
self.value = 0
def __del__(self):
self.Close()
def __int__(self):
return self.value
def ErrCheckHandle(result, func, args):
"""errcheck function for Windows functions that return a HANDLE."""
if not result:
raise WinError()
return AutoHANDLE(result)
# PROCESS_INFORMATION structure
class PROCESS_INFORMATION(Structure):
_fields_ = [("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessID", DWORD),
("dwThreadID", DWORD)]
def __init__(self):
Structure.__init__(self)
self.cb = sizeof(self)
LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
# STARTUPINFO structure
class STARTUPINFO(Structure):
_fields_ = [("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE)
]
LPSTARTUPINFO = POINTER(STARTUPINFO)
STARTF_USESHOWWINDOW = 0x01
STARTF_USESIZE = 0x02
STARTF_USEPOSITION = 0x04
STARTF_USECOUNTCHARS = 0x08
STARTF_USEFILLATTRIBUTE = 0x10
STARTF_RUNFULLSCREEN = 0x20
STARTF_FORCEONFEEDBACK = 0x40
STARTF_FORCEOFFFEEDBACK = 0x80
STARTF_USESTDHANDLES = 0x100
# EnvironmentBlock
class EnvironmentBlock:
"""An object which can be passed as the lpEnv parameter of CreateProcess.
It is initialized with a dictionary."""
def __init__(self, dict):
if not dict:
self._as_parameter_ = None
else:
values = ["%s=%s" % (key, value)
for (key, value) in dict.iteritems()]
values.append("")
self._as_parameter_ = LPCWSTR("\0".join(values))
# CreateProcess()
CreateProcessProto = WINFUNCTYPE(BOOL, # Return type
LPCWSTR, # lpApplicationName
LPWSTR, # lpCommandLine
LPVOID, # lpProcessAttributes
LPVOID, # lpThreadAttributes
BOOL, # bInheritHandles
DWORD, # dwCreationFlags
LPVOID, # lpEnvironment
LPCWSTR, # lpCurrentDirectory
LPSTARTUPINFO, # lpStartupInfo
LPPROCESS_INFORMATION # lpProcessInformation
)
CreateProcessFlags = ((1, "lpApplicationName", None),
(1, "lpCommandLine"),
(1, "lpProcessAttributes", None),
(1, "lpThreadAttributes", None),
(1, "bInheritHandles", True),
(1, "dwCreationFlags", 0),
(1, "lpEnvironment", None),
(1, "lpCurrentDirectory", None),
(1, "lpStartupInfo"),
(2, "lpProcessInformation"))
def ErrCheckCreateProcess(result, func, args):
ErrCheckBool(result, func, args)
# return a tuple (hProcess, hThread, dwProcessID, dwThreadID)
pi = args[9]
return AutoHANDLE(pi.hProcess), AutoHANDLE(pi.hThread), pi.dwProcessID, pi.dwThreadID
CreateProcess = CreateProcessProto(("CreateProcessW", windll.kernel32),
CreateProcessFlags)
CreateProcess.errcheck = ErrCheckCreateProcess
CREATE_BREAKAWAY_FROM_JOB = 0x01000000
CREATE_DEFAULT_ERROR_MODE = 0x04000000
CREATE_NEW_CONSOLE = 0x00000010
CREATE_NEW_PROCESS_GROUP = 0x00000200
CREATE_NO_WINDOW = 0x08000000
CREATE_SUSPENDED = 0x00000004
CREATE_UNICODE_ENVIRONMENT = 0x00000400
DEBUG_ONLY_THIS_PROCESS = 0x00000002
DEBUG_PROCESS = 0x00000001
DETACHED_PROCESS = 0x00000008
# CreateJobObject()
CreateJobObjectProto = WINFUNCTYPE(HANDLE, # Return type
LPVOID, # lpJobAttributes
LPCWSTR # lpName
)
CreateJobObjectFlags = ((1, "lpJobAttributes", None),
(1, "lpName", None))
CreateJobObject = CreateJobObjectProto(("CreateJobObjectW", windll.kernel32),
CreateJobObjectFlags)
CreateJobObject.errcheck = ErrCheckHandle
# AssignProcessToJobObject()
AssignProcessToJobObjectProto = WINFUNCTYPE(BOOL, # Return type
HANDLE, # hJob
HANDLE # hProcess
)
AssignProcessToJobObjectFlags = ((1, "hJob"),
(1, "hProcess"))
AssignProcessToJobObject = AssignProcessToJobObjectProto(
("AssignProcessToJobObject", windll.kernel32),
AssignProcessToJobObjectFlags)
AssignProcessToJobObject.errcheck = ErrCheckBool
# ResumeThread()
def ErrCheckResumeThread(result, func, args):
if result == -1:
raise WinError()
return args
ResumeThreadProto = WINFUNCTYPE(DWORD, # Return type
HANDLE # hThread
)
ResumeThreadFlags = ((1, "hThread"),)
ResumeThread = ResumeThreadProto(("ResumeThread", windll.kernel32),
ResumeThreadFlags)
ResumeThread.errcheck = ErrCheckResumeThread
# TerminateJobObject()
TerminateJobObjectProto = WINFUNCTYPE(BOOL, # Return type
HANDLE, # hJob
UINT # uExitCode
)
TerminateJobObjectFlags = ((1, "hJob"),
(1, "uExitCode", 127))
TerminateJobObject = TerminateJobObjectProto(
("TerminateJobObject", windll.kernel32),
TerminateJobObjectFlags)
TerminateJobObject.errcheck = ErrCheckBool
# WaitForSingleObject()
WaitForSingleObjectProto = WINFUNCTYPE(DWORD, # Return type
HANDLE, # hHandle
DWORD, # dwMilliseconds
)
WaitForSingleObjectFlags = ((1, "hHandle"),
(1, "dwMilliseconds", -1))
WaitForSingleObject = WaitForSingleObjectProto(
("WaitForSingleObject", windll.kernel32),
WaitForSingleObjectFlags)
INFINITE = -1
WAIT_TIMEOUT = 0x0102
WAIT_OBJECT_0 = 0x0
WAIT_ABANDONED = 0x0080
# GetExitCodeProcess()
GetExitCodeProcessProto = WINFUNCTYPE(BOOL, # Return type
HANDLE, # hProcess
LPDWORD, # lpExitCode
)
GetExitCodeProcessFlags = ((1, "hProcess"),
(2, "lpExitCode"))
GetExitCodeProcess = GetExitCodeProcessProto(
("GetExitCodeProcess", windll.kernel32),
GetExitCodeProcessFlags)
GetExitCodeProcess.errcheck = ErrCheckBool
| |
from gym.spaces import Box
from functools import partial
import logging
import numpy as np
import gym
from typing import Dict, Tuple, List
import ray
import ray.experimental.tf_utils
from ray.rllib.agents.ddpg.ddpg_tf_model import DDPGTFModel
from ray.rllib.agents.ddpg.ddpg_torch_model import DDPGTorchModel
from ray.rllib.agents.ddpg.noop_model import NoopModel, TorchNoopModel
from ray.rllib.agents.dqn.dqn_tf_policy import postprocess_nstep_and_prio, PRIO_WEIGHTS
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import Deterministic, Dirichlet
from ray.rllib.models.torch.torch_action_dist import TorchDeterministic, TorchDirichlet
from ray.rllib.utils.annotations import override
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import get_variable, try_import_tf
from ray.rllib.utils.spaces.simplex import Simplex
from ray.rllib.utils.tf_utils import huber_loss, make_tf_callable
from ray.rllib.utils.typing import (
TrainerConfigDict,
TensorType,
LocalOptimizer,
ModelGradients,
)
from ray.util.debug import log_once
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
def build_ddpg_models(
policy: Policy,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
) -> ModelV2:
if policy.config["use_state_preprocessor"]:
default_model = None # catalog decides
num_outputs = 256 # arbitrary
config["model"]["no_final_linear"] = True
else:
default_model = TorchNoopModel if config["framework"] == "torch" else NoopModel
num_outputs = int(np.product(observation_space.shape))
policy.model = ModelCatalog.get_model_v2(
obs_space=observation_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
model_interface=(
DDPGTorchModel if config["framework"] == "torch" else DDPGTFModel
),
default_model=default_model,
name="ddpg_model",
actor_hidden_activation=config["actor_hidden_activation"],
actor_hiddens=config["actor_hiddens"],
critic_hidden_activation=config["critic_hidden_activation"],
critic_hiddens=config["critic_hiddens"],
twin_q=config["twin_q"],
add_layer_norm=(
policy.config["exploration_config"].get("type") == "ParameterNoise"
),
)
policy.target_model = ModelCatalog.get_model_v2(
obs_space=observation_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
model_interface=(
DDPGTorchModel if config["framework"] == "torch" else DDPGTFModel
),
default_model=default_model,
name="target_ddpg_model",
actor_hidden_activation=config["actor_hidden_activation"],
actor_hiddens=config["actor_hiddens"],
critic_hidden_activation=config["critic_hidden_activation"],
critic_hiddens=config["critic_hiddens"],
twin_q=config["twin_q"],
add_layer_norm=(
policy.config["exploration_config"].get("type") == "ParameterNoise"
),
)
return policy.model
def get_distribution_inputs_and_class(
policy: Policy,
model: ModelV2,
obs_batch: SampleBatch,
*,
explore: bool = True,
is_training: bool = False,
**kwargs
) -> Tuple[TensorType, ActionDistribution, List[TensorType]]:
model_out, _ = model(SampleBatch(obs=obs_batch, _is_training=is_training), [], None)
dist_inputs = model.get_policy_output(model_out)
if isinstance(policy.action_space, Simplex):
distr_class = (
TorchDirichlet if policy.config["framework"] == "torch" else Dirichlet
)
else:
distr_class = (
TorchDeterministic
if policy.config["framework"] == "torch"
else Deterministic
)
return dist_inputs, distr_class, [] # []=state out
def ddpg_actor_critic_loss(
policy: Policy, model: ModelV2, _, train_batch: SampleBatch
) -> TensorType:
twin_q = policy.config["twin_q"]
gamma = policy.config["gamma"]
n_step = policy.config["n_step"]
use_huber = policy.config["use_huber"]
huber_threshold = policy.config["huber_threshold"]
l2_reg = policy.config["l2_reg"]
input_dict = SampleBatch(obs=train_batch[SampleBatch.CUR_OBS], _is_training=True)
input_dict_next = SampleBatch(
obs=train_batch[SampleBatch.NEXT_OBS], _is_training=True
)
model_out_t, _ = model(input_dict, [], None)
model_out_tp1, _ = model(input_dict_next, [], None)
target_model_out_tp1, _ = policy.target_model(input_dict_next, [], None)
policy.target_q_func_vars = policy.target_model.variables()
# Policy network evaluation.
policy_t = model.get_policy_output(model_out_t)
policy_tp1 = policy.target_model.get_policy_output(target_model_out_tp1)
# Action outputs.
if policy.config["smooth_target_policy"]:
target_noise_clip = policy.config["target_noise_clip"]
clipped_normal_sample = tf.clip_by_value(
tf.random.normal(
tf.shape(policy_tp1), stddev=policy.config["target_noise"]
),
-target_noise_clip,
target_noise_clip,
)
policy_tp1_smoothed = tf.clip_by_value(
policy_tp1 + clipped_normal_sample,
policy.action_space.low * tf.ones_like(policy_tp1),
policy.action_space.high * tf.ones_like(policy_tp1),
)
else:
# No smoothing, just use deterministic actions.
policy_tp1_smoothed = policy_tp1
# Q-net(s) evaluation.
# prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
# Q-values for given actions & observations in given current
q_t = model.get_q_values(model_out_t, train_batch[SampleBatch.ACTIONS])
# Q-values for current policy (no noise) in given current state
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if twin_q:
twin_q_t = model.get_twin_q_values(
model_out_t, train_batch[SampleBatch.ACTIONS]
)
# Target q-net(s) evaluation.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1, policy_tp1_smoothed)
if twin_q:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1_smoothed
)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.minimum(q_tp1, twin_q_tp1)
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (
1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)
) * q_tp1_best
# Compute RHS of bellman equation.
q_t_selected_target = tf.stop_gradient(
tf.cast(train_batch[SampleBatch.REWARDS], tf.float32)
+ gamma ** n_step * q_tp1_best_masked
)
# Compute the error (potentially clipped).
if twin_q:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold) + huber_loss(
twin_td_error, huber_threshold
)
else:
errors = 0.5 * tf.math.square(td_error) + 0.5 * tf.math.square(
twin_td_error
)
else:
td_error = q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold)
else:
errors = 0.5 * tf.math.square(td_error)
critic_loss = tf.reduce_mean(
tf.cast(train_batch[PRIO_WEIGHTS], tf.float32) * errors
)
actor_loss = -tf.reduce_mean(q_t_det_policy)
# Add l2-regularization if required.
if l2_reg is not None:
for var in policy.model.policy_variables():
if "bias" not in var.name:
actor_loss += l2_reg * tf.nn.l2_loss(var)
for var in policy.model.q_variables():
if "bias" not in var.name:
critic_loss += l2_reg * tf.nn.l2_loss(var)
# Model self-supervised losses.
if policy.config["use_state_preprocessor"]:
# Expand input_dict in case custom_loss' need them.
input_dict[SampleBatch.ACTIONS] = train_batch[SampleBatch.ACTIONS]
input_dict[SampleBatch.REWARDS] = train_batch[SampleBatch.REWARDS]
input_dict[SampleBatch.DONES] = train_batch[SampleBatch.DONES]
input_dict[SampleBatch.NEXT_OBS] = train_batch[SampleBatch.NEXT_OBS]
if log_once("ddpg_custom_loss"):
logger.warning(
"You are using a state-preprocessor with DDPG and "
"therefore, `custom_loss` will be called on your Model! "
"Please be aware that DDPG now uses the ModelV2 API, which "
"merges all previously separate sub-models (policy_model, "
"q_model, and twin_q_model) into one ModelV2, on which "
"`custom_loss` is called, passing it "
"[actor_loss, critic_loss] as 1st argument. "
"You may have to change your custom loss function to handle "
"this."
)
[actor_loss, critic_loss] = model.custom_loss(
[actor_loss, critic_loss], input_dict
)
# Store values for stats function.
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.td_error = td_error
policy.q_t = q_t
# Return one loss value (even though we treat them separately in our
# 2 optimizers: actor and critic).
return policy.critic_loss + policy.actor_loss
def build_apply_op(
policy: Policy, optimizer: LocalOptimizer, grads_and_vars: ModelGradients
) -> TensorType:
# For policy gradient, update policy net one time v.s.
# update critic net `policy_delay` time(s).
should_apply_actor_opt = tf.equal(
tf.math.floormod(policy.global_step, policy.config["policy_delay"]), 0
)
def make_apply_op():
return policy._actor_optimizer.apply_gradients(policy._actor_grads_and_vars)
actor_op = tf.cond(
should_apply_actor_opt, true_fn=make_apply_op, false_fn=lambda: tf.no_op()
)
critic_op = policy._critic_optimizer.apply_gradients(policy._critic_grads_and_vars)
# Increment global step & apply ops.
if policy.config["framework"] in ["tf2", "tfe"]:
policy.global_step.assign_add(1)
return tf.no_op()
else:
with tf1.control_dependencies([tf1.assign_add(policy.global_step, 1)]):
return tf.group(actor_op, critic_op)
def gradients_fn(
policy: Policy, optimizer: LocalOptimizer, loss: TensorType
) -> ModelGradients:
if policy.config["framework"] in ["tf2", "tfe"]:
tape = optimizer.tape
pol_weights = policy.model.policy_variables()
actor_grads_and_vars = list(
zip(tape.gradient(policy.actor_loss, pol_weights), pol_weights)
)
q_weights = policy.model.q_variables()
critic_grads_and_vars = list(
zip(tape.gradient(policy.critic_loss, q_weights), q_weights)
)
else:
actor_grads_and_vars = policy._actor_optimizer.compute_gradients(
policy.actor_loss, var_list=policy.model.policy_variables()
)
critic_grads_and_vars = policy._critic_optimizer.compute_gradients(
policy.critic_loss, var_list=policy.model.q_variables()
)
# Clip if necessary.
if policy.config["grad_clip"]:
clip_func = partial(tf.clip_by_norm, clip_norm=policy.config["grad_clip"])
else:
clip_func = tf.identity
# Save grads and vars for later use in `build_apply_op`.
policy._actor_grads_and_vars = [
(clip_func(g), v) for (g, v) in actor_grads_and_vars if g is not None
]
policy._critic_grads_and_vars = [
(clip_func(g), v) for (g, v) in critic_grads_and_vars if g is not None
]
grads_and_vars = policy._actor_grads_and_vars + policy._critic_grads_and_vars
return grads_and_vars
def build_ddpg_stats(policy: Policy, batch: SampleBatch) -> Dict[str, TensorType]:
stats = {
"mean_q": tf.reduce_mean(policy.q_t),
"max_q": tf.reduce_max(policy.q_t),
"min_q": tf.reduce_min(policy.q_t),
}
return stats
class ActorCriticOptimizerMixin:
"""Mixin class to generate the necessary optimizers for actor-critic algos.
- Creates global step for counting the number of update operations.
- Creates separate optimizers for actor, critic, and alpha.
"""
def __init__(self, config):
# Eager mode.
if config["framework"] in ["tf2", "tfe"]:
self.global_step = get_variable(0, tf_name="global_step")
self._actor_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["actor_lr"]
)
self._critic_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["critic_lr"]
)
# Static graph mode.
else:
self.global_step = tf1.train.get_or_create_global_step()
self._actor_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["actor_lr"]
)
self._critic_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["critic_lr"]
)
def setup_early_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
) -> None:
"""Call mixin classes' constructors before Policy's initialization.
Adds the necessary optimizers to the given Policy.
Args:
policy (Policy): The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config (TrainerConfigDict): The Policy's config.
"""
ActorCriticOptimizerMixin.__init__(policy, config)
class ComputeTDErrorMixin:
def __init__(self, loss_fn):
@make_tf_callable(self.get_session(), dynamic_shape=True)
def compute_td_error(
obs_t, act_t, rew_t, obs_tp1, done_mask, importance_weights
):
# Do forward pass on loss to update td errors attribute
# (one TD-error value per item in batch to update PR weights).
loss_fn(
self,
self.model,
None,
{
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_t),
SampleBatch.ACTIONS: tf.convert_to_tensor(act_t),
SampleBatch.REWARDS: tf.convert_to_tensor(rew_t),
SampleBatch.NEXT_OBS: tf.convert_to_tensor(obs_tp1),
SampleBatch.DONES: tf.convert_to_tensor(done_mask),
PRIO_WEIGHTS: tf.convert_to_tensor(importance_weights),
},
)
# `self.td_error` is set in loss_fn.
return self.td_error
self.compute_td_error = compute_td_error
def setup_mid_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
) -> None:
ComputeTDErrorMixin.__init__(policy, ddpg_actor_critic_loss)
class TargetNetworkMixin:
def __init__(self, config: TrainerConfigDict):
@make_tf_callable(self.get_session())
def update_target_fn(tau):
tau = tf.convert_to_tensor(tau, dtype=tf.float32)
update_target_expr = []
model_vars = self.model.trainable_variables()
target_model_vars = self.target_model.trainable_variables()
assert len(model_vars) == len(target_model_vars), (
model_vars,
target_model_vars,
)
for var, var_target in zip(model_vars, target_model_vars):
update_target_expr.append(
var_target.assign(tau * var + (1.0 - tau) * var_target)
)
logger.debug("Update target op {}".format(var_target))
return tf.group(*update_target_expr)
# Hard initial update.
self._do_update = update_target_fn
self.update_target(tau=1.0)
# Support both hard and soft sync.
def update_target(self, tau: int = None) -> None:
self._do_update(np.float32(tau or self.config.get("tau")))
@override(TFPolicy)
def variables(self) -> List[TensorType]:
return self.model.variables() + self.target_model.variables()
def setup_late_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
) -> None:
TargetNetworkMixin.__init__(policy, config)
def validate_spaces(
policy: Policy,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
) -> None:
if not isinstance(action_space, Box):
raise UnsupportedSpaceException(
"Action space ({}) of {} is not supported for "
"DDPG.".format(action_space, policy)
)
elif len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space ({}) of {} has multiple dimensions "
"{}. ".format(action_space, policy, action_space.shape)
+ "Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API."
)
DDPGTFPolicy = build_tf_policy(
name="DDPGTFPolicy",
get_default_config=lambda: ray.rllib.agents.ddpg.ddpg.DEFAULT_CONFIG,
make_model=build_ddpg_models,
action_distribution_fn=get_distribution_inputs_and_class,
loss_fn=ddpg_actor_critic_loss,
stats_fn=build_ddpg_stats,
postprocess_fn=postprocess_nstep_and_prio,
compute_gradients_fn=gradients_fn,
apply_gradients_fn=build_apply_op,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
validate_spaces=validate_spaces,
before_init=setup_early_mixins,
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
mixins=[
TargetNetworkMixin,
ActorCriticOptimizerMixin,
ComputeTDErrorMixin,
],
)
| |
import pyjd
from pyjamas import log
import browser
import gdk
#import lxml.etree
# WINDOW TYPES
WINDOW_TOPLEVEL = 1
# GTK OPTIONS (flags)
EXPAND = 1
FILL = 2
# GTK WIDGET FLAGS
TOPLEVEL = 1
NO_WINDOW = 2
REALIZED = 4
MAPPED = 8
VISIBLE = 16
SENSITIVE = 32
PARENT_SENSITIVE = 64
CAN_FOCUS = 128
HAS_FOCUS = 256
CAN_DEFAULT = 512
HAS_DEFAULT = 1024
HAS_GRAB = 2048
RC_STYLE = 4096
COMPOSITE_CHILD = 8192
NO_REPARENT = 16384
APP_PAINTABLE = 32768
RECEIVES_DEFAULT = 65536
DOUBLE_BUFFERED = 131072
# GTK Update Type Constants
UPDATE_CONTINUOUS = 1
UPDATE_DISCONTINUOUS = 2
UPDATE_DELAYED = 4
# GTK Position Type Constants
POS_LEFT = 1
POS_RIGHT = 2
POS_TOP = 4
POS_BOTTOM = 8
class GObject:
def __init__(self):
self.callbacks = {}
self.connections = 0
def connect(self, detailed_signal, handler, *data):
detailed_signal = detailed_signal.replace('_', '-')
l = self.callbacks.setdefault(detailed_signal,[])
l.append((handler, data))
self.connections += 1
return self.connections
def connect_object(self, detailed_signal, handler, gobject, data=None):
detailed_signal = detailed_signal.replace('_', '-')
def inner(widget, data):
if handler.func_code.co_argcount == 1:
handler(widget)
else:
handler(widget, data)
self.connect(detailed_signal, inner, data)
def emit(self, detailed_signal, *args):
detailed_signal = detailed_signal.replace('_', '-')
if self.callbacks.has_key(detailed_signal):
for pair in self.callbacks[detailed_signal]:
if pair[1]:
pair[0](self, *pair[1])
else:
pair[0](self)
def dom_event(self, event, element):
pass
class Object(GObject):
def __init__(self):
GObject.__init__(self)
self.flags = 0
def set_flags(self, flags):
self.flags = flags
class Widget(Object):
def __init__(self):
Object.__init__(self)
self._visible = False
self.widget_cont = browser.Element('div')
self.widget_cont.setStyle('visibility', 'hidden')
self.widget_cont.setStyle('position', 'absolute')
self.widget_cont.setStyle('overflow', 'hidden')
self.minheight = 1
self.minwidth = 1
self.widget_cont.setPxStyle('minHeight', self.minheight)
self.widget_cont.setPxStyle('minWidth', self.minwidth)
self.margin = 0
self.widget_cont.setPxStyle('margin', self.margin)
self._parent = None
def get_allocation(self):
x = self.widget_cont.getX()
y = self.widget_cont.getY()
w = self.widget_cont.getWidth()
h = self.widget_cont.getHeight()
return gdk.Rectangle(x, y, w, h)
def show(self):
self._visible = True
self.widget_cont.setStyle('visibility', 'visible')
self._redraw()
def hide(self):
self._visible = False
self.widget_cont.setStyle('visibility', 'hidden')
def show_all(self):
self.show()
def hide_all(self):
self.hide()
def destroy(self):
self.emit('destroy')
def grab_default(self):
pass #TODO
def set_size_request(self, width, height):
pass
def _redraw(self):
container = self.widget_cont
container.setPxStyle('minHeight', self.minheight)
container.setPxStyle('minWidth', self.minwidth)
container.setPxStyle('margin', self.margin)
class Entry(Widget):
def __init__(self):
Widget.__init__(self)
self.widget_int = browser.Document.createElement('input')
self.widget_cont.append(self.widget_int)
class Container(Widget):
def __init__(self):
Widget.__init__(self)
self.children = []
self.widget_int = browser.Document.createElement('div')
self.widget_int.setStyle('position', 'absolute')
self.widget_cont.append(self.widget_int)
def add(self, child):
if self._visible: child.show()
child._parent = self
self.children.append(child)
self.widget_int.append(child.widget_cont)
self.minwidth += child.minwidth
self.minheight += child.minheight
def set_border_width(self, border_width):
self.margin = border_width
self._redraw()
def get_border_width(self):
return self.margin
def _redraw(self):
Widget._redraw(self)
container = self.widget_cont
container.setPxStyle('width', container.getWidth())
container.setPxStyle('height', container.getHeight())
self.minwidth = 2 * self.margin
self.minheight = 2 * self.margin
for child in self.children:
child._redraw()
if len(self.children) == 1:
self.minwidth += self.children[0].minwidth
self.minheight += self.children[0].minheight
container.setPxStyle('minHeight', self.minheight)
container.setPxStyle('minWidth', self.minwidth)
def show_all(self):
for child in self.children:
child.show_all()
Widget.show_all(self)
def hide_all(self):
for child in self.children:
child.hide_all()
Widget.hide_all(self)
def child_set_property(self, child, prop, value):
setattr(child, prop, value)
class Bin(Container):
def __init__(self):
Container.__init__(self)
def get_child(self):
if len(self.children)>0:
return self.children[0]
else:
return None
def add(self, child):
if len(self.children)>0:
pass #TODO: GtkWarning !!!
Container.add(self, child)
class Table(Container):
def __init__(self, rows=1, columns=1, homogeneous=False):
Container.__init__(self)
self.rows = rows
self.columns = columns
self.vert_inc = 100.0/rows
self.horitz_inc = 100.0/columns
def attach(self, child, left_attach, right_attach,
top_attach, bottom_attach, xoptions=None,
yoptions=None, xpadding=0, ypadding=0):
if xoptions is None:
xoptions = EXPAND | FILL
if yoptions is None:
yoptions = EXPAND | FILL
Container.add(self, child)
child_container = child.widget_cont
child_container.setPercentStyle('left', left_attach * self.horitz_inc)
child_container.setPercentStyle('right',
100 - right_attach * self.horitz_inc)
child_container.setPercentStyle('top', top_attach * self.vert_inc)
child_container.setPercentStyle('bottom',
100 - bottom_attach * self.vert_inc)
class Box(Container):
def __init__(self):
Container.__init__(self)
def _add_element(self, element):
Container.add(self, element)
def pack_start(self, child, expand=True, fill=True, padding=0):
child.expand = expand
child.fill = fill
child.padding = padding
self._add_element(child)
def add(self, child):
child.expand = True
child.fill = True
child.padding = 0
self._add_element(child)
class HBox(Box):
def __init__(self, homogeneous=False, spacing=0):
Box.__init__(self)
self.homogeneous = homogeneous
self.spacing = spacing
def _add_element(self, element):
Box._add_element(self, element)
element.widget_cont.setPxStyle('height', self.widget_cont.getHeight() -
2 * self.margin)
self._redraw()
def _redraw(self):
Box._redraw(self)
count = 0
fix_width = 0
if not self.homogeneous:
for child in self.children:
if child.expand:
count += 1
else:
fix_width += child.minwidth + self.spacing + \
child.padding + 2 * child.margin
else:
count = len(self.children)
container = self.widget_cont
left = self.margin
for child in self.children:
if len(self.children) != 1:
if child.minheight + 2 * self.margin > self.minheight:
self.minheight = child.minheight + 2 * self.margin
self.minwidth += child.minwidth + 2 * child.margin + \
self.spacing + child.padding
container.setPxStyle('minHeight', self.minheight)
container.setPxStyle('minWidth', self.minwidth)
count = max(count, 1)
horiz_inc = (container.getWidth() - 2*self.margin - fix_width) / count
for child in self.children:
child_container = child.widget_cont
child_container.setPxStyle('height',
container.getHeight() - 2 * self.margin)
child_container.setPxStyle('left', left + self.spacing / 2 +
child.padding / 2)
if child.expand:
left += horiz_inc
else:
left += child.minwidth + 2 * child.margin + self.spacing + \
child.padding
right = container.getWidth() - self.margin - left
right = max(right, self.margin)
child_container.setPxStyle('right', right + self.spacing / 2 +
child.padding / 2)
child._redraw()
class VBox(Box):
def __init__(self, homogeneous=False, spacing=0):
Box.__init__(self)
self.homogeneous = homogeneous
self.spacing = spacing
def _add_element(self, element):
Box._add_element(self, element)
element.widget_cont.setPxStyle('width', self.widget_cont.getWidth() -
2 * self.margin)
self._redraw()
def _redraw(self):
Box._redraw(self)
count = 0
fix_height = 0
if not self.homogeneous:
for child in self.children:
if child.expand:
count += 1
else:
fix_height += child.minheight + self.spacing + \
child.padding + 2 * child.margin
else:
count = len(self.children)
top = self.margin
for child in self.children:
if len(self.children) != 1:
if child.minwidth + 2 * self.margin > self.minwidth:
self.minwidth = child.minwidth + 2 * self.margin
self.minheight += child.minheight + 2 * child.margin + \
self.spacing + child.padding
self.widget_cont.setPxStyle('minHeight', self.minheight)
self.widget_cont.setPxStyle('minWidth', self.minwidth)
count = max(count, 1)
vert_inc = (self.widget_cont.getHeight() - 2 * self.margin -
fix_height) / count
for child in self.children:
child.widget_cont.setPxStyle('width', self.widget_cont.getWidth() -
2 * self.margin)
child.widget_cont.setPxStyle('top', top + self.spacing / 2 +
child.padding / 2)
if child.expand:
top += vert_inc
else:
top += child.minheight + 2 * child.margin + self.spacing + \
child.padding
bottom = self.widget_cont.getHeight() - self.margin - top
bottom = max(bottom, self.margin)
child.widget_cont.setPxStyle('bottom', bottom + self.spacing / 2 +
child.padding / 2)
child._redraw()
class Window(Bin):
def __init__(self, type=WINDOW_TOPLEVEL):
Bin.__init__(self)
browser.Document.window.catchEvents(['resize'], self)
self.type = type
self.title = ''
self.child = None
# XXX using a tuple here isn't supported yet
for style in ['top', 'bottom', 'right', 'left']:
self.widget_cont.setPxStyle(style, 0)
if self.type == WINDOW_TOPLEVEL:
browser.Document.append(self.widget_cont)
else:
pass #TODO: Create pop-up
def add(self, child):
Bin.add(self, child)
child.widget_cont.setPxStyle('width', self.widget_cont.getWidth())
child.widget_cont.setPxStyle('height', self.widget_cont.getHeight())
self.child = child
def set_title(self, title):
self.title = title
if self.type == WINDOW_TOPLEVEL:
browser.Document.setTitle(title)
else:
pass #TODO
def show(self):
self._redraw()
Bin.show(self)
def _redraw(self):
if self.child:
self.child.widget_cont.setPxStyle('width',
self.widget_cont.getWidth())
self.child.widget_cont.setPxStyle('height',
self.widget_cont.getHeight())
Bin._redraw(self)
def dom_event(self, event, element):
if event.type in ['resize']:
self._redraw()
class Button(Bin):
def __init__(self, label=None):
Bin.__init__(self)
self.widget_cont.catchEvents(['click'], self)
self.child = None
if label is not None:
self.add(Label(label))
self.widget_int.setStyle('textAlign', 'center')
self.widget_int.setProperty('className', 'button')
self.minheight = 25
self.minwidth = 20
def add(self, child):
Bin.add(self, child)
self.child = child
self._redraw()
def _redraw(self):
Bin._redraw(self)
self.minheight += self.child.minheight
self.minwidth += self.child.minwidth
width = self.widget_cont.getWidth()
width = max(width, self.minwidth + 2)
height = self.widget_cont.getHeight()
height = max(height, self.minheight + 2)
self.widget_int.setPxStyle('width', width - 2)
self.widget_int.setPxStyle('height', height - 2)
self.child.widget_cont.setPxStyle('width', width)
self.child.widget_cont.setPxStyle('height', height)
self.child._redraw()
def dom_event(self, event, element):
if event.type == 'click':
self.emit('clicked')
class ToggleButton(Button):
def __init__(self, label=None):
Button.__init__(self, label)
self.connect("toggled", self.toggled)
self.istoggled = False
self.widget_int.setProperty('className', 'togglebutton')
def toggled(self, widget, event=None, data=None):
self.istoggled = not self.istoggled
if self.istoggled:
self.widget_int.setProperty('className', 'togglebutton-toggled')
else:
self.widget_int.setProperty('className', 'togglebutton')
def set_active(self, is_active):
if (is_active and not self.istoggled) or \
(not is_active and self.istoggled):
self.emit('toggled')
def get_active(self):
return self.istoggled
def dom_event(self, event, element):
if event.type == 'click':
self.emit('toggled')
class CheckButton(ToggleButton):
def __init__(self, label=None):
ToggleButton.__init__(self)
self.check = browser.Element('input')
self.check.setStyle('position', 'absolute')
self.check.setStyle('width', 'auto')
self.check.setStyle('height', 'auto')
self.check.setPxStyle('left', 0)
self.check.setProperty('type', 'checkbox')
self.check_widget = Widget()
self.check_widget.widget_cont.append(self.check)
self.check_widget.show()
self.box = HBox(spacing=6)
self.box.show()
self.box.pack_start(self.check_widget, False)
self.add(self.box)
if label is not None:
self.label = Label(label)
self.box.pack_start(self.label, False)
self.widget_int.setProperty('className', 'checkbutton')
def add(self, child):
#TODO Check that no more than one widget is added.
ToggleButton.add(self, child)
def toggled(self, widget, event=None, data=None):
self.istoggled = not self.istoggled
if self.istoggled:
self.check.setProperty('checked', "1")
else:
self.check.setProperty('checked', None)
def _redraw(self):
ToggleButton._redraw(self)
# XXX what's this half pixel in 2.5?
self.check.setPxStyle('top',
self.check_widget.widget_cont.getHeight() / 2 -
self.check.getHeight() / 2 - 2.5)
self.check_widget.minwidth = self.check.getWidth() + 2
self.check_widget.minheight = self.check.getHeight() + 2
self.check_widget._redraw()
class RadioButton(CheckButton):
counter = 0
groups = {}
running = False
def __init__(self, group=None, label=None):
CheckButton.__init__(self, label)
self.check.setProperty('type', 'radio')
if group is None:
self.group = RadioButton.counter
RadioButton.counter += 1
RadioButton.groups[self.group] = [self]
else:
self.group = group.group
RadioButton.groups[self.group].append(self)
def toggled(self, widget, event=None, data=None):
if RadioButton.running:
return
RadioButton.running = True
for b in RadioButton.groups[self.group]:
if b.istoggled:
b.check.setProperty('checked', False)
b.istoggled = False
b.emit('toggled')
self.check.setProperty('checked', True)
self.istoggled = True
RadioButton.running = False
class Misc(Widget):
def __init__(self):
Widget.__init__(self)
self.xalign = 0.5
self.yalign = 0.5
def set_alignment(self, xalign, yalign):
self.xalign = xalign
self.yalign = yalign
def get_alignment(self):
return (self.xalign, self.yalign)
class Image(Misc):
def __init__(self):
Misc.__init__(self)
self.img = browser.Element('img')
self.img.setStyle('position', 'absolute')
self.img.setStyle('width', 'auto')
self.img.setStyle('height', 'auto')
self.widget_cont.append(self.img)
self.widget_cont.setProperty('className', 'image')
def set_from_file(self, filename):
self.img.setProperty('src', filename)
def _redraw(self):
Misc._redraw(self)
self.img.setPxStyle('top', (self.widget_cont.getHeight() -
self.img.getHeight()) * self.yalign)
self.img.setPxStyle('left', (self.widget_cont.getWidth() -
self.img.getWidth()) * self.xalign)
self.minwidth = self.img.getWidth()
self.minheight = self.img.getHeight()
class Label(Misc):
def __init__(self, str=None):
Misc.__init__(self)
self.label = browser.Element('div')
self.label.setStyle('position', 'absolute')
self.label.setStyle('width', 'auto')
self.label.setStyle('height', 'auto')
self.label.setStyle('whiteSpace', 'nowrap')
self.label.setHTML(str)
self.widget_cont.append(self.label)
self.widget_cont.setStyle('visibility', 'visible')
self.widget_cont.setProperty('className', 'label')
def set_text(self, str):
self.label.setHTML(str)
def get_text(self):
return self.label.getHTML()
def _redraw(self):
Misc._redraw(self)
self.label.setPxStyle('top', (self.widget_cont.getHeight() -
self.label.getHeight()) * self.yalign)
self.label.setPxStyle('left', (self.widget_cont.getWidth() -
self.label.getWidth()) * self.xalign)
self.minwidth = self.label.getWidth()
self.minheight = self.label.getHeight()
class Separator(Widget):
def __init__(self):
Widget.__init__(self)
class HSeparator(Separator):
def __init__(self):
Separator.__init__(self)
self.separator = browser.Element('hr')
self.widget_cont.append(self.separator)
self.widget_cont.setProperty('className', 'hseparator')
self.minheight = 10
class Adjustment(Object):
def __init__(self, value=0, lower=0, upper=0, step_incr=0, page_incr=0,
page_size=0):
Object.__init__(self)
self.value = value
self.lower = lower
self.upper = upper
self.step_incr = step_incr
self.page_incr = page_incr
self.page_size = page_size
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
if self.value < self.lower:
self.value = self.lower
if self.value > self.upper:
self.value = self.upper
self.emit('value-changed')
def changed(self):
self.emit('changed')
class Range(Widget):
def __init__(self, adjustment=None):
Widget.__init__(self)
self.value = browser.Element('div')
self.value.setStyle('position', 'absolute')
self.widget_cont.append(self.value)
if adjustment is not None:
self.adjustment = adjustment
else:
self.adjustment = Adjustment()
self.adjustment.connect('value-changed', self._adjustment_value_changed)
self.adjustment.connect('changed', self._adjustment_changed)
self.value.setHTML(str(self.adjustment.get_value()))
def set_update_policy(self, policy):
pass
def _adjustment_value_changed(self, sender):
self.value.setHTML(str(self.adjustment.get_value()))
def _adjustment_changed(self):
self._redraw()
class Scale(Range):
def __init__(self, adjustment=None):
Range.__init__(self, adjustment)
self.line = browser.Element('div')
self.line.setStyle('position', 'absolute')
self.line.setProperty('className', 'scale')
self.line.catchEvents(['click'], self)
self.cursor = browser.Element('div')
self.cursor.setStyle('position', 'absolute')
self.cursor.setProperty('className', 'scale-cursor')
self.widget_cont.append(self.line)
self.mouseover = False
self.value_pos = POS_TOP
self.draw_value = True
self.digits = 1
self.cursor.catchEvents(['mousedown'], self)
browser.Document.document.catchEvents(['mousemove'], self)
browser.Document.document.catchEvents(['mouseup'], self)
def set_digits(self, digits):
self.digits = digits
self._redraw()
def set_draw_value(self, draw_value):
self.draw_value = draw_value
self._redraw()
def get_draw_value(self):
return self.draw_value
def set_value_pos(self, pos):
self.value_pos = pos
self._redraw()
def get_value_pos(self):
return self.value_pos
def _adjustment_value_changed(self, sender):
value = self.adjustment.get_value()
value = browser.round_val(value, self.digits)
self.value.setHTML(str(value))
def _move_cursor(self, event):
pass
def dom_event(self, event, element):
if event.type == 'mousedown':
self.mouseover = True
elif event.type == 'mousemove' and self.mouseover:
self._move_cursor(event)
elif event.type == 'click':
self._move_cursor(event)
elif event.type == 'mouseup':
self.mouseover = False
class VScale(Scale):
def __init__(self, adjustment=None):
Scale.__init__(self, adjustment)
self.line.setPxStyle('width', 15)
self.cursor.setPxStyle('height', 30)
self.cursor.setPxStyle('width', 13)
self.line.append(self.cursor)
self.minwidth = 30
self.minheight = 60
def _redraw(self):
Scale._redraw(self)
# define shortcuts
container = self.widget_cont
line = self.line
value = self.value
# geometric properties
container_width = container.getWidth()
container_height = container.getHeight()
line_width = self.line.getWidth()
value_width = self.value.getWidth()
value_height = self.value.getHeight()
if not self.draw_value:
line.setPxStyle('left', (container_width - line_width) / 2)
line.setPxStyle('top', 0)
line.setPxStyle('height', container_height - 2)
value.setStyle('visibility', 'hidden')
else:
value.setStyle('visibility', 'visible')
if self.value_pos == POS_TOP:
value.setPxStyle('left', (container_width - value_width) / 2)
value.setPxStyle('top', 0)
line.setPxStyle('left', (container_width - line_width) / 2)
line.setPxStyle('top', value_height + 2)
line.setPxStyle('height', container_height - value_height - 4)
elif self.value_pos == POS_LEFT:
value.setPxStyle('left', container_width / 2 -
(value_width + line_width) / 2)
line.setPxStyle('left', container_width / 2 +
(value_width + line_width) / 2)
line.setPxStyle('top', 0)
line.setPxStyle('height', container_height - 2)
elif self.value_pos == POS_RIGHT:
value.setPxStyle('left', container_width / 2 +
(value_width + line_width) / 2)
line.setPxStyle('left', container_width / 2 -
(value_width + line_width) / 2)
line.setPxStyle('top', 0)
line.setPxStyle('height', container_height - 2)
else:
value.setPxStyle('left', (container_width - value_width) / 2)
value.setPxStyle('top', container_height - value_height)
line.setPxStyle('left', (container_width - line_width) / 2)
line.setPxStyle('top', 0)
line.setPxStyle('height', container_height - value_height - 4)
self._adjustment_value_changed(self)
def _move_cursor(self, event):
Scale._move_cursor(self, event)
y = event.clientY - self.line.getY() - self.cursor.getHeight() / 2
y = max(y, 0)
y = min(y, self.line.getHeight() - self.cursor.getHeight() - 2)
value = (y / (self.line.getHeight() - self.cursor.getHeight() - 2)) * \
(self.adjustment.upper - self.adjustment.page_size)
if self.draw_value:
value = round(value, self.digits)
if event.type == 'click':
old_value = self.adjustment.get_value()
incr = self.adjustment.page_incr
if value > old_value:
self.adjustment.set_value(old_value + incr)
elif value < old_value:
self.adjustment.set_value(old_value - incr)
else:
self.adjustment.set_value(value)
def _adjustment_value_changed(self, sender):
Scale._adjustment_value_changed(self, self)
value = self.adjustment.get_value()
if self.draw_value:
value = round(value, self.digits)
y = (value - self.adjustment.lower) / \
(self.adjustment.upper - self.adjustment.page_size) * \
(self.line.getHeight() - self.cursor.getHeight() - 2)
self.cursor.setPxStyle('top', y)
if self.value_pos in (POS_LEFT, POS_RIGHT):
pos = y - self.value.getHeight() / 2 + self.cursor.getHeight() / 2
self.value.setPxStyle('top', pos)
class HScale(Scale):
def __init__(self, adjustment=None):
Scale.__init__(self, adjustment)
self.line.setPxStyle('height', 15)
self.cursor.setPxStyle('height', 13)
self.cursor.setPxStyle('width', 30)
self.line.append(self.cursor)
self.minwidth = 60
self.minheight = 37
def _redraw(self):
Scale._redraw(self)
# define shortcuts
container = self.widget_cont
line = self.line
value = self.value
# geometric properties
container_width = container.getWidth()
container_height = container.getHeight()
line_width = self.line.getWidth()
line_height = self.line.getHeight()
value_width = self.value.getWidth()
value_height = self.value.getHeight()
if not self.draw_value:
line.setPxStyle('top', (container_height - line_height) / 2)
line.setPxStyle('width', container_width - 2)
value.setStyle('visibility', 'hidden')
else:
self.value.setStyle('visibility', 'visible')
if self.value_pos == POS_TOP:
value.setPxStyle('top', container_height / 2 -
(line_height + value_height + 2) / 2)
line.setPxStyle('left', 0)
line.setPxStyle('top', container_height / 2 + line_height / 2 -
value_height / 2 + 1)
line.setPxStyle('width', container_width - 2)
elif self.value_pos == POS_LEFT:
value.setPxStyle('left', 0)
value.setPxStyle('top', (container_height - value_height) / 2)
line.setPxStyle('left', value_width + 2)
line.setPxStyle('top', (container_height - line_height) / 2)
line.setPxStyle('width', container_width - (value_width+2) - 2)
elif self.value_pos == POS_RIGHT:
value.setPxStyle('left', container_width - value_width)
value.setPxStyle('top', (container_height - value_height) / 2)
line.setPxStyle('left', 0)
line.setPxStyle('top', (container_height - line_height) / 2)
line.setPxStyle('width', container_width - (value_width+2) - 2)
else:
value.setPxStyle('top', container_height / 2 + line_height / 2 -
value_height / 2 + 1)
line.setPxStyle('left', 0)
line.setPxStyle('top', container_height / 2 -
(line_height + value_height + 2) / 2)
line.setPxStyle('width', container_width - 2)
self._adjustment_value_changed(self)
def _move_cursor(self, event):
Scale._move_cursor(self, event)
x = event.clientX - self.line.getX() - self.cursor.getWidth() / 2
x = max(x, 0)
x = min(x, self.line.getWidth() - self.cursor.getWidth() - 2)
value = (x / (self.line.getWidth() - self.cursor.getWidth() - 2)) * \
(self.adjustment.upper - self.adjustment.page_size)
if self.draw_value:
value = round(value, self.digits)
if event.type == 'click':
old_value = self.adjustment.get_value()
incr = self.adjustment.page_incr
if value > old_value:
self.adjustment.set_value(old_value + incr)
elif value < old_value:
self.adjustment.set_value(old_value - incr)
else:
self.adjustment.set_value(value)
def _adjustment_value_changed(self, sender):
Scale._adjustment_value_changed(self, self)
value = self.adjustment.get_value()
if self.draw_value:
value = round(value, self.digits)
x = (value - self.adjustment.lower) / \
(self.adjustment.upper - self.adjustment.page_size) * \
(self.line.getWidth() - self.cursor.getWidth() - 2)
self.cursor.setPxStyle('left', x)
if self.value_pos in (POS_TOP, POS_BOTTOM):
pos = x - self.value.getWidth() / 2 + self.cursor.getWidth() / 2
pos = max(pos, 0)
pos = min(pos, self.line.getWidth() - self.value.getWidth())
self.value.setPxStyle('left', pos)
class Scrollbar(Range):
def __init__(self, adjustment=None):
Range.__init__(self, adjustment)
# assign instance variables
self.down_arrow = browser.Element('div')
self.up_arrow = browser.Element('div')
# shortcuts
down_arrow = self.down_arrow
up_arrow = self.up_arrow
# use them
down_arrow.setStyle('position', 'absolute')
down_arrow.setPxStyle('height', 15)
down_arrow.setPxStyle('width', 15)
down_arrow.catchEvents(['click'], self)
up_arrow.setStyle('position', 'absolute')
up_arrow.setPxStyle('height', 15)
up_arrow.setPxStyle('width', 15)
up_arrow.catchEvents(['click'], self)
self.line = browser.Element('div')
self.line.setStyle('position', 'absolute')
self.line.setProperty('className', 'scrollbar')
self.line.catchEvents(['click'], self)
self.cursor = browser.Element('div')
self.cursor.setStyle('position', 'absolute')
self.cursor.setProperty('className', 'scrollbar-cursor')
self.widget_cont.append(down_arrow)
self.widget_cont.append(self.line)
self.widget_cont.append(up_arrow)
self.mouseover = False
self.cursor.catchEvents(['mousedown'], self)
self.value.setStyle('visibility', 'hidden')
browser.Document.document.catchEvents(['mousemove'], self)
browser.Document.document.catchEvents(['mouseup'], self)
def _adjustment_value_changed(self, sender):
pass
def _move_cursor(self, event):
pass
def dom_event(self, event, element):
if event.type == 'mousedown':
self.mouseover = True
elif event.type == 'mousemove' and self.mouseover:
self._move_cursor(event)
elif event.type == 'click':
self._move_cursor(event)
elif event.type == 'mouseup':
self.mouseover = False
class HScrollbar(Scrollbar):
def __init__(self, adjustment=None):
Scrollbar.__init__(self, adjustment)
self.down_arrow.setProperty('className', 'scrollbar-left-arrow')
self.up_arrow.setProperty('className', 'scrollbar-right-arrow')
self.line.setPxStyle('height', 15)
self.cursor.setPxStyle('height', 13)
self.line.append(self.cursor)
self.minwidth = 60
self.minheight = 37
def _redraw(self):
Scrollbar._redraw(self)
top = (self.widget_cont.getHeight() - self.line.getHeight()) / 2
self.down_arrow.setPxStyle('top', top)
self.down_arrow.setPxStyle('left', 0)
self.line.setPxStyle('top', top)
self.line.setPxStyle('left', self.down_arrow.getWidth())
self.line.setPxStyle('width', self.widget_cont.getWidth() - 2 -
self.down_arrow.getWidth() -
self.up_arrow.getWidth())
self.up_arrow.setPxStyle('top', top)
self.up_arrow.setPxStyle('left', self.down_arrow.getWidth() +
self.line.getWidth())
cursor_size = (self.widget_cont.getWidth() - 2) * \
self.adjustment.page_size / 100.0
cursor_size = max(cursor_size, 30)
self.cursor.setPxStyle('width', cursor_size)
self._adjustment_value_changed(self)
def _move_cursor(self, event):
Scrollbar._move_cursor(self, event)
pos = event.clientX - self.line.getX()
x = pos - self.cursor.getWidth() / 2
x = max(x, 0)
x = min(x, self.line.getWidth() - self.cursor.getWidth() - 2)
value = (x / (self.line.getWidth() - self.cursor.getWidth() - 2)) * \
(self.adjustment.upper - self.adjustment.page_size)
if event.type == 'click':
old_value = self.adjustment.get_value()
if pos < 0:
incr = self.adjustment.step_incr
self.adjustment.set_value(old_value - incr)
elif pos > self.line.getWidth():
incr = self.adjustment.step_incr
self.adjustment.set_value(old_value + incr)
else:
incr = self.adjustment.page_incr
if value > old_value:
self.adjustment.set_value(old_value + incr)
elif value < old_value:
self.adjustment.set_value(old_value - incr)
else:
self.adjustment.set_value(value)
def _adjustment_value_changed(self, sender):
Scrollbar._adjustment_value_changed(self, self)
value = self.adjustment.get_value()
x = (value - self.adjustment.lower) / \
(self.adjustment.upper - self.adjustment.page_size) * \
(self.line.getWidth() - self.cursor.getWidth() - 2)
self.cursor.setPxStyle('left', x)
class VScrollbar(Scrollbar):
def __init__(self, adjustment=None):
Scrollbar.__init__(self, adjustment)
self.down_arrow.setProperty('className', 'scrollbar-down-arrow')
self.up_arrow.setProperty('className', 'scrollbar-up-arrow')
self.line.setPxStyle('width', 15)
self.cursor.setPxStyle('width', 13)
self.line.append(self.cursor)
self.minwidth = 30
self.minheight = 90
def _redraw(self):
Scrollbar._redraw(self)
left = (self.widget_cont.getWidth() - self.line.getWidth()) / 2
self.up_arrow.setPxStyle('left', left)
self.up_arrow.setPxStyle('top', 0)
self.line.setPxStyle('top', self.up_arrow.getHeight())
self.line.setPxStyle('left', left)
self.line.setPxStyle('height', self.widget_cont.getHeight() - 2 -
self.up_arrow.getHeight() -
self.down_arrow.getHeight())
self.down_arrow.setPxStyle('top', self.up_arrow.getHeight() +
self.line.getHeight())
self.down_arrow.setPxStyle('left', left)
cursor_size = (self.widget_cont.getHeight() - 2) * \
self.adjustment.page_size / 100.0
cursor_size = max(cursor_size, 30)
self.cursor.setPxStyle('height', cursor_size)
self._adjustment_value_changed(self)
def _move_cursor(self, event):
Scrollbar._move_cursor(self, event)
pos = event.clientY - self.line.getY()
y = pos - self.cursor.getHeight() / 2
y = max(y, 0)
y = min(y, self.line.getHeight() - self.cursor.getHeight() - 2)
value = (y / (self.line.getHeight() - self.cursor.getHeight() - 2)) * \
(self.adjustment.upper - self.adjustment.page_size)
if event.type == 'click':
old_value = self.adjustment.get_value()
if pos < 0:
incr = self.adjustment.step_incr
self.adjustment.set_value(old_value - incr)
elif pos > self.line.getHeight():
incr = self.adjustment.step_incr
self.adjustment.set_value(old_value + incr)
else:
incr = self.adjustment.page_incr
if value > old_value:
self.adjustment.set_value(old_value + incr)
elif value < old_value:
self.adjustment.set_value(old_value - incr)
else:
self.adjustment.set_value(value)
def _adjustment_value_changed(self, sender):
Scrollbar._adjustment_value_changed(self)
value = self.adjustment.get_value()
y = (value - self.adjustment.lower) / \
(self.adjustment.upper - self.adjustment.page_size) * \
(self.line.getHeight() - self.cursor.getHeight() - 2)
self.cursor.setPxStyle('top', y)
class OptionMenu(Button):
def __init__(self):
Button.__init__(self)
self.ico = browser.Element('img')
self.ico.setProperty('src', 'arr.png')
self.ico.setStyle('position','absolute')
self.ico.setPxStyle('right', 2)
self.widget_int.append(self.ico)
self.connect('clicked', self._clicked, None)
self.menu = None
self.menu_open = False
self.label = Label('')
self.label.set_alignment(0, 0.5)
self.add(self.label)
def _redraw(self):
Button._redraw(self)
rect = self.get_allocation()
pad = rect.height / 2 - self.ico.getHeight() / 2
self.ico.setPxStyle('top', pad)
def _clicked(self, elem, data=None):
self.menu_open = not self.menu_open
if not self.menu._visible:
rect = self.get_allocation()
self.menu.widget_cont.setPxStyle('left', rect.x)
self.menu.widget_cont.setPxStyle('top', rect.y + rect.height)
self.menu.show_all()
else:
self.menu.hide_all()
def _selected(self, elem, data=None):
act = self.menu.get_active()
if act is not None:
self.label.set_text(act.label_cont)
def set_menu(self, menu):
self.label.set_text(menu.items[0].label_cont)
self.menu = menu
self.menu.connect('selection-done', self._selected, None)
class MenuShell(Container):
def __init__(self):
Container.__init__(self)
self.items = []
container = self.widget_cont
container.setStyle('border','1px solid gray')
container.setStyle('position', 'absolute')
container.setStyle('width', 'auto')
container.setStyle('height', '')
container.setStyle('left', '')
container.setStyle('right', '')
container.setStyle('bottom', '')
container.setStyle('top', '')
container.setStyle('zIndex', '100')
self.widget_int = self.widget_cont
self.widget_int.setStyle('position', 'absolute')
container.setProperty('className', 'menushell')
self.hide()
browser.Document.append(self.widget_cont)
def append(self, child):
child.hide()
child.connect('select', self._selected, None)
self.items.append(child)
self.add(child)
def _selected(self, elem, data=None):
self.emit('selection-done')
self.hide_all()
def _redraw(self):
Container._redraw(self)
for child in self.children:
self.minwidth = max(self.minwidth, child.minwidth)
self.minheight += child.minheight
self.widget_cont.setPxStyle('minHeight', self.minheight)
self.widget_cont.setPxStyle('minWidth', self.minwidth)
class Menu(MenuShell):
def append(self, child):
child.connect('select', self._catch_active, None)
MenuShell.append(self, child)
self._active = None
def _catch_active(self, elem, data=None):
self._active = elem
def get_active(self):
return self._active
class Item(Bin):
def __init__(self, name):
Bin.__init__(self)
self.label_cont = name
container = self.widget_cont
container.catchEvents(['click'], self)
container.setStyle('position', '')
container.setStyle('width', '100%')
container.setStyle('bottom', '')
container.setStyle('top', '')
self.widget_int.setStyle('position', 'absolute')
self.widget_int = self.widget_cont
container.setProperty('className', 'menuitem')
self.content = Label(name)
self.content.hide()
self.add(self.content)
def dom_event(self, event, element):
if event.type == 'click':
self.emit('select')
self.emit('toggle')
def show(self):
Bin.show(self)
self.content.show()
def hide(self):
Bin.hide(self)
self.content.hide()
def _redraw(self):
Bin._redraw(self)
self.widget_cont.setPercentStyle('width', 100)
class MenuItem(Item):
def dom_event(self, event, element):
Item.dom_event(self, event, element)
if event.type == 'click':
self.emit('activate')
gtkbuildermap = {
'GtkWindow': Window,
'GtkTable': Table,
'GtkLabel': Label,
'GtkVBox': VBox,
'GtkHBox': HBox,
'GtkEntry': Entry
}
def find_props(node):
res = {}
if not node:
return {}
props = node.getElementsByTagName("property")
for i in range(props.length):
n = props.item(n)
name = n.attributes.getNamedItem('name').nodeValue
log.write("find_props ")
log.write(name)
log.write(" ")
log.write(n.textContent)
log.writebr("")
res[name] = n.textContent
return res
class BuilderETree:
def __init__(self):
self.objects = []
def create_object_from_xml_node(self, node):
klsname = node.attrib['class']
id = node.attrib['id']
obj = gtkmap[klsname]()
for prop in node.findall("property"):
name = prop.attrib['name']
value = prop.textContent
try:
setattr(obj.props, name, value)
# XXX except without a specified exception is bad style
# most of the time, perhaps use an AttributeError here?
except:
if value.isdigit():
setattr(obj.props, name, int(value))
else:
print "setattr failed", klsname, name, value
for childnode in node.findall("child"):
childobj = childnode.find("object")
if childobj is None:
continue
child = self.create_object_from_xml_node(childobj)
obj.add_child(Builder(), child, klsname)
props = find_props(childnode.find("packing"))
for prop, value in props.items():
if value.isdigit():
value = int(value)
obj.child_set_property(child, prop, value)
print props
return obj
def add_from_file(self, fname):
s = open(fname).read()
return s.add_from_string(s)
def add_from_string(self, f):
s = open(fname).read() # whoops don't expect this to work in pyjamas!
doc = lxml.etree.fromstring(s)
for x in doc:
if x.tag != 'object':
continue
obj = self.create_object_from_xml_node(x)
self.objects.append(obj)
def get_objects(self):
return self.objects
gtkmap = {'GtkWindow': Window,
'GtkTable': Table,
'GtkLabel': Label,
'GtkVBox': VBox,
'GtkHBox': HBox,
'GtkEntry': Entry
}
class Builder:
def __init__(self):
self.objects = []
def create_object_from_xml_node(self, node):
klsname = node.attributes.getNamedItem('class').nodeValue
# XXX shadowing builtin id
id = node.attributes.getNamedItem('id').nodeValue
log.writebr("%s %s" % (klsname, id))
obj = gtkmap[klsname]()
props = node.getElementsByTagName("property")
log.writebr("%s %d" % (klsname, props.length))
for i in range(props.length):
prop = props.item(i)
name = prop.attributes.getNamedItem('name').nodeValue
value = prop.textContent
try:
setattr(obj, name, value)
# XXX missing exception class, should probably be AttributeError
except:
if value and value.isdigit():
setattr(obj, name, int(value))
else:
print "setattr failed", klsname, name, value
childnodes = node.getElementsByTagName("child")
log.writebr("%s children %d" % (klsname, childnodes.length))
for i in range(childnodes.length):
childnode = childnodes.item(i)
childobj = childnode.getElementsByTagName("object")
if childobj is None:
continue
if childobj.length == 0:
continue
childobj = childobj.item(0)
child = self.create_object_from_xml_node(childobj)
#obj.add_child(gtk.Builder(), child, klsname)
obj.add(child)
packing = childnode.getElementsByTagName("packing")
if packing is None:
continue
if packing.length == 0:
continue
packing = packing.item(0)
props = find_props(packing)
for prop, value in props.items():
if value.isdigit():
value = int(value)
obj.child_set_property(child, prop, value)
return obj
def add_from_file(self, fname):
fobj = open(fname)
try:
s = fobj.read()
finally:
fobj.close()
return s.add_from_string(s)
def add_from_string(self, xmldoc):
x = xmldoc.firstChild.firstChild
while x:
log.writebr(x.nodeName)
if x.nodeName == 'object':
log.writebr("creating object")
obj = self.create_object_from_xml_node(x)
self.objects.append(obj)
x = x.nextSibling
def get_objects(self):
return self.objects
def main():
pass
def main_quit():
#TODO: In popups, close it !
browser.Document.setContent('This application has finalized !')
| |
"""
Compliance Checker suite runner
"""
import codecs
import inspect
import itertools
import os
import re
import subprocess
import sys
import textwrap
import warnings
from collections import defaultdict
from datetime import datetime, timezone
from distutils.version import StrictVersion
from operator import itemgetter
from urllib.parse import urlparse
import requests
from lxml import etree as ET
from netCDF4 import Dataset
from owslib.sos import SensorObservationService
from owslib.swe.sensor.sml import SensorML
from pkg_resources import working_set
from compliance_checker import MemoizedDataset, __version__, tempnc
from compliance_checker.base import BaseCheck, GenericFile, Result, fix_return_value
from compliance_checker.cf.cf import CFBaseCheck
from compliance_checker.protocols import cdl, erddap, netcdf, opendap
# Ensure output is encoded as Unicode when checker output is redirected or piped
if sys.stdout.encoding is None:
sys.stdout = codecs.getwriter("utf8")(sys.stdout)
if sys.stderr.encoding is None:
sys.stderr = codecs.getwriter("utf8")(sys.stderr)
def extract_docstring_summary(docstring):
"""
Returns a dedented docstring without parameter information
:param docstring: A docstring
:type docstring: str
:returns: str
"""
# return a dedented, then indented two spaces docstring with leading and
# trailing whitespace removed.
return re.sub(
r"^(?=.)",
" ",
textwrap.dedent(
re.split(r"\n\s*:\w", docstring, flags=re.MULTILINE)[0]
).strip(),
flags=re.MULTILINE,
)
class CheckSuite(object):
checkers = (
{}
) # Base dict of checker names to BaseCheck derived types, override this in your CheckSuite implementation
templates_root = "compliance_checker" # modify to load alternative Jinja2 templates
def __init__(self, options=None):
self.col_width = 40
self.options = options or {}
@classmethod
def _get_generator_plugins(cls):
"""
Return a list of classes from external plugins that are used to
generate checker classes
"""
if not hasattr(cls, "suite_generators"):
gens = working_set.iter_entry_points("compliance_checker.generators")
cls.suite_generators = [x.resolve() for x in gens]
return cls.suite_generators
def _print_suites(self, verbose=0):
"""
Prints out available check suites. If the verbose argument is True,
includes the internal module version number of the check and also displays
"latest" meta-versions.
:param check_suite: Check suite object
:param verbose: Integer indicating whether to print verbose output
:type verbose: int
"""
for checker in sorted(self.checkers.keys()):
version = getattr(self.checkers[checker], "_cc_checker_version", "???")
if verbose > 0:
print(" - {} (v{})".format(checker, version))
elif ":" in checker and not checker.endswith(
":latest"
): # Skip the "latest" output
print(" - {}".format(checker))
def _print_checker(self, checker_obj):
"""
Prints each available check and a description with an abridged
docstring for a given checker object
:param checker_obj: Checker object on which to operate
:type checker_obj: subclass of compliance_checker.base.BaseChecker
"""
check_functions = self._get_checks(checker_obj, defaultdict(lambda: None))
for c, _ in check_functions:
print("- {}".format(c.__name__))
if c.__doc__ is not None:
u_doc = c.__doc__
print("\n{}\n".format(extract_docstring_summary(u_doc)))
@classmethod
def add_plugin_args(cls, parser):
"""
Add command line arguments for external plugins that generate checker
classes
"""
for gen in cls._get_generator_plugins():
gen.add_arguments(parser)
@classmethod
def load_generated_checkers(cls, args):
"""
Load checker classes from generator plugins
"""
for gen in cls._get_generator_plugins():
checkers = gen.get_checkers(args)
cls.checkers.update(checkers)
@classmethod
def load_all_available_checkers(cls):
"""
Helper method to retrieve all sub checker classes derived from various
base classes.
"""
cls._load_checkers(working_set.iter_entry_points("compliance_checker.suites"))
@classmethod
def _load_checkers(cls, checkers):
"""
Loads up checkers in an iterable into the class checkers dict
:param checkers: An iterable containing the checker objects
"""
for c in checkers:
try:
check_obj = c.resolve()
if hasattr(check_obj, "_cc_spec") and hasattr(
check_obj, "_cc_spec_version"
):
check_version_str = ":".join(
(check_obj._cc_spec, check_obj._cc_spec_version)
)
cls.checkers[check_version_str] = check_obj
# TODO: remove this once all checkers move over to the new
# _cc_spec, _cc_spec_version
else:
# if _cc_spec and _cc_spec_version attributes aren't
# present, fall back to using name attribute
checker_name = getattr(check_obj, "name", None) or getattr(
check_obj, "_cc_spec", None
)
warnings.warn(
"Checker for {} should implement both "
'"_cc_spec" and "_cc_spec_version" '
'attributes. "name" attribute is deprecated. '
"Assuming checker is latest version.",
DeprecationWarning,
)
# append "unknown" to version string since no versioning
# info was provided
cls.checkers["{}:unknown".format(checker_name)] = check_obj
except Exception as e:
print("Could not load", c, ":", e, file=sys.stderr)
# find the latest version of versioned checkers and set that as the
# default checker for compliance checker if no version is specified
ver_checkers = sorted([c.split(":", 1) for c in cls.checkers if ":" in c])
for spec, versions in itertools.groupby(ver_checkers, itemgetter(0)):
version_nums = [v[-1] for v in versions]
try:
latest_version = str(max(StrictVersion(v) for v in version_nums))
# if the version can't be parsed as a StrictVersion, parse
# according to character collation
except ValueError:
latest_version = max(version_nums)
cls.checkers[spec] = cls.checkers[spec + ":latest"] = cls.checkers[
":".join((spec, latest_version))
]
def _get_checks(self, checkclass, skip_checks):
"""
Helper method to retrieve check methods from a Checker class. Excludes
any checks in `skip_checks`.
The name of the methods in the Checker class should start with "check_"
for this method to find them.
"""
meths = inspect.getmembers(checkclass, inspect.isroutine)
# return all check methods not among the skipped checks
returned_checks = []
for fn_name, fn_obj in meths:
if fn_name.startswith("check_") and skip_checks[fn_name] != BaseCheck.HIGH:
returned_checks.append((fn_obj, skip_checks[fn_name]))
return returned_checks
def _run_check(self, check_method, ds, max_level):
"""
Runs a check and appends a result to the values list.
@param bound method check_method: a given check method
@param netCDF4 dataset ds
@param int max_level: check level
@return list: list of Result objects
"""
val = check_method(ds)
if hasattr(val, "__iter__"):
# Handle OrderedDict when we need to modify results in a superclass
# i.e. some checks in CF 1.7 which extend CF 1.6 behaviors
if isinstance(val, dict):
val_iter = val.values()
else:
val_iter = val
check_val = []
for v in val_iter:
res = fix_return_value(
v,
check_method.__func__.__name__,
check_method,
check_method.__self__,
)
if max_level is None or res.weight > max_level:
check_val.append(res)
return check_val
else:
check_val = fix_return_value(
val, check_method.__func__.__name__, check_method, check_method.__self__
)
if max_level is None or check_val.weight > max_level:
return [check_val]
else:
return []
def _get_check_versioned_name(self, check_name):
"""
The compliance checker allows the user to specify a
check without a version number but we want the report
to specify the version number.
Returns the check name with the version number it checked
"""
if ":" not in check_name or ":latest" in check_name:
check_name = ":".join(
(check_name.split(":")[0], self.checkers[check_name]._cc_spec_version)
)
return check_name
def _get_check_url(self, check_name):
"""
Return the check's reference URL if it exists. If not, return empty str.
@param check_name str: name of the check being run returned by
_get_check_versioned_name()
"""
return getattr(self.checkers[check_name], "_cc_url", "")
def _get_valid_checkers(self, ds, checker_names):
"""
Returns a filtered list of 2-tuples: (name, valid checker) based on the ds object's type and
the user selected names.
"""
assert len(self.checkers) > 0, "No checkers could be found."
if len(checker_names) == 0:
checker_names = list(self.checkers.keys())
args = [
(name, self.checkers[name])
for name in checker_names
if name in self.checkers
]
valid = []
all_checked = set(a[1] for a in args) # only class types
checker_queue = set(args)
while len(checker_queue):
name, a = checker_queue.pop()
# is the current dataset type in the supported filetypes
# for the checker class?
if type(ds) in a().supported_ds:
valid.append((name, a))
# add subclasses of SOS checks
if "ioos_sos" in name:
for subc in a.__subclasses__():
if subc not in all_checked:
all_checked.add(subc)
checker_queue.add((name, subc))
return valid
@classmethod
def _process_skip_checks(cls, skip_checks):
"""
Processes an iterable of skip_checks with strings and returns a dict
with <check_name>: <max_skip_level> pairs
"""
check_dict = defaultdict(lambda: None)
# A is for "all", "M" is for medium, "L" is for low
check_lookup = {"A": BaseCheck.HIGH, "M": BaseCheck.MEDIUM, "L": BaseCheck.LOW}
for skip_check_spec in skip_checks:
split_check_spec = skip_check_spec.split(":")
check_name = split_check_spec[0]
if len(split_check_spec) < 2:
check_max_level = BaseCheck.HIGH
else:
try:
check_max_level = check_lookup[split_check_spec[1]]
except KeyError:
warnings.warn(
"Skip specifier '{}' on check '{}' not found,"
" defaulting to skip entire check".format(
split_check_spec[1], check_name
)
)
check_max_level = BaseCheck.HIGH
check_dict[check_name] = check_max_level
return check_dict
def run(self, ds, skip_checks, *checker_names):
"""
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
"""
ret_val = {}
checkers = self._get_valid_checkers(ds, checker_names)
if skip_checks is not None:
skip_check_dict = CheckSuite._process_skip_checks(skip_checks)
else:
skip_check_dict = defaultdict(lambda: None)
if len(checkers) == 0:
print(
"No valid checkers found for tests '{}'".format(",".join(checker_names))
)
for checker_name, checker_class in checkers:
# TODO: maybe this a little more reliable than depending on
# a string to determine the type of the checker -- perhaps
# use some kind of checker object with checker type and
# version baked in
checker_type_name = checker_name.split(":")[0]
checker_opts = self.options.get(checker_type_name, set())
# instantiate a Checker object
try:
checker = checker_class(options=checker_opts)
# hacky fix for no options in constructor
except TypeError:
checker = checker_class()
# TODO? : Why is setup(ds) called at all instead of just moving the
# checker setup into the constructor?
# setup method to prep
checker.setup(ds)
checks = self._get_checks(checker, skip_check_dict)
vals = []
errs = {} # check method name -> (exc, traceback)
for c, max_level in checks:
try:
vals.extend(self._run_check(c, ds, max_level))
except Exception as e:
errs[c.__func__.__name__] = (e, sys.exc_info()[2])
# score the results we got back
groups = self.scores(vals)
# invoke finalizer explicitly
del checker
ret_val[checker_name] = groups, errs
return ret_val
@classmethod
def passtree(cls, groups, limit):
for r in groups:
if r.children:
x = cls.passtree(r.children, limit)
if r.weight >= limit and x is False:
return False
if r.weight >= limit and r.value[0] != r.value[1]:
return False
return True
def build_structure(self, check_name, groups, source_name, limit=1):
"""
Compiles the checks, results and scores into an aggregate structure which looks like:
{
"scored_points": 396,
"low_count": 0,
"possible_points": 400,
"testname": "gliderdac",
"medium_count": 2,
"source_name": ".//rutgers/ru01-20140120T1444/ru01-20140120T1649.nc",
"high_count": 0,
"all_priorities" : [...],
"high_priorities": [...],
"medium_priorities" : [...],
"low_priorities" : [...]
}
@param check_name The test which was run
@param groups List of results from compliance checker
@param source_name Source of the dataset, used for title
"""
aggregates = {}
aggregates["scored_points"] = 0
aggregates["possible_points"] = 0
high_priorities = []
medium_priorities = []
low_priorities = []
all_priorities = []
aggregates["high_count"] = 0
aggregates["medium_count"] = 0
aggregates["low_count"] = 0
def named_function(result):
for child in result.children:
all_priorities.append(child)
named_function(child)
# For each result, bin them into the appropriate category, put them all
# into the all_priorities category and add up the point values
for res in groups:
if res.weight < limit:
continue
# If the result has 0 possible points, then it was not valid for
# this dataset and contains no meaningful information
if res.value[1] == 0:
continue
aggregates["scored_points"] += res.value[0]
aggregates["possible_points"] += res.value[1]
if res.weight == 3:
high_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates["high_count"] += 1
elif res.weight == 2:
medium_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates["medium_count"] += 1
else:
low_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates["low_count"] += 1
all_priorities.append(res)
# Some results have children
# We don't render children inline with the top three tables, but we
# do total the points and display the messages
named_function(res)
aggregates["high_priorities"] = high_priorities
aggregates["medium_priorities"] = medium_priorities
aggregates["low_priorities"] = low_priorities
aggregates["all_priorities"] = all_priorities
aggregates["testname"] = self._get_check_versioned_name(check_name)
aggregates["source_name"] = source_name
aggregates["scoreheader"] = self.checkers[check_name]._cc_display_headers
aggregates["cc_spec_version"] = self.checkers[check_name]._cc_spec_version
aggregates["cc_url"] = self._get_check_url(aggregates["testname"])
aggregates["report_timestamp"] = datetime.now(timezone.utc).strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
aggregates["cc_version"] = __version__
return aggregates
def dict_output(self, check_name, groups, source_name, limit):
"""
Builds the results into a JSON structure and writes it to the file buffer.
@param check_name The test which was run
@param groups List of results from compliance checker
@param output_filename Path to file to save output
@param source_name Source of the dataset, used for title
@param limit Integer value for limiting output
"""
aggregates = self.build_structure(check_name, groups, source_name, limit)
return self.serialize(aggregates)
def serialize(self, o):
"""
Returns a safe serializable object that can be serialized into JSON.
@param o Python object to serialize
"""
if isinstance(o, (list, tuple)):
return [self.serialize(i) for i in o]
if isinstance(o, dict):
return {k: self.serialize(v) for k, v in o.items()}
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, Result):
return self.serialize(o.serialize())
return o
def checker_html_output(self, check_name, groups, source_name, limit):
"""
Renders the HTML output for a single test using Jinja2 and returns it
as a string.
@param check_name The test which was run
@param groups List of results from compliance checker
@param source_name Source of the dataset, used for title
@param limit Integer value for limiting output
"""
from jinja2 import Environment, PackageLoader
self.j2 = Environment(
loader=PackageLoader(self.templates_root, "data/templates")
)
template = self.j2.get_template("ccheck.html.j2")
template_vars = self.build_structure(check_name, groups, source_name, limit)
return template.render(**template_vars)
def html_output(self, checkers_html):
"""
Renders the HTML output for multiple tests and returns it as a string.
@param checkers_html List of HTML for single tests as returned by
checker_html_output
"""
# Note: This relies on checker_html_output having been called so that
# self.j2 is initialised
template = self.j2.get_template("ccheck_wrapper.html.j2")
return template.render(checkers=checkers_html)
def get_points(self, groups, limit):
score_list = []
score_only_list = []
for g in groups:
if g.weight >= limit:
score_only_list.append(g.value)
# checks where all pertinent sections passed
all_passed = sum(x[0] == x[1] for x in score_only_list)
out_of = len(score_only_list)
# sorts lists into high/medium/low order
score_list.sort(key=lambda x: x.weight, reverse=True)
return score_list, all_passed, out_of
def standard_output(self, ds, limit, check_name, groups):
"""
Generates the Terminal Output for Standard cases
Returns the dataset needed for the verbose output, as well as the failure flags.
"""
score_list, points, out_of = self.get_points(groups, limit)
issue_count = out_of - points
# Let's add the version number to the check name if it's missing
check_name = self._get_check_versioned_name(check_name)
check_url = self._get_check_url(check_name)
width = 2 * self.col_width
# NOTE: printing and use of .center()
# Nested .format() calls should be avoided when possible.
# As a future enhancement, a string.Template string might work best here
# but for the time being individual lines are printed and centered with
# .center()
print("\n")
print("-" * width)
print("IOOS Compliance Checker Report".center(width))
print("Version {}".format(__version__).center(width))
print(
"Report generated {}".format(
datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
).center(width)
)
print("{}".format(check_name).center(width))
print("{}".format(check_url).center(width))
print("-" * width)
if issue_count > 0:
print("Corrective Actions".center(width))
plural = "" if issue_count == 1 else "s"
print(
"{} has {} potential issue{}".format(
os.path.basename(ds), issue_count, plural
)
)
return [groups, points, out_of]
def standard_output_generation(self, groups, limit, points, out_of, check):
"""
Generates the Terminal Output
"""
if points < out_of:
self.reasoning_routine(groups, check, priority_flag=limit)
else:
print("All tests passed!")
def reasoning_routine(self, groups, check, priority_flag=3, _top_level=True):
"""
print routine performed
@param list groups: the Result groups
@param str check: checker name
@param int priority_flag: indicates the weight of the groups
@param bool _top_level: indicates the level of the group so as to
print out the appropriate header string
"""
sort_fn = lambda x: x.weight
groups_sorted = sorted(groups, key=sort_fn, reverse=True)
# create dict of the groups -> {level: [reasons]}
result = {
key: [v for v in valuesiter if v.value[0] != v.value[1]]
for key, valuesiter in itertools.groupby(groups_sorted, key=sort_fn)
}
priorities = self.checkers[check]._cc_display_headers
def process_table(res, check):
"""Recursively calls reasoning_routine to parse out child reasons
from the parent reasons.
@param Result res: Result object
@param str check: checker name"""
issue = res.name
if not res.children:
reasons = res.msgs
else:
child_reasons = self.reasoning_routine(
res.children, check, _top_level=False
)
# there shouldn't be messages if there are children
# is this a valid assumption?
reasons = child_reasons
return issue, reasons
# iterate in reverse to the min priority requested;
# the higher the limit, the more lenient the output
proc_strs = ""
for level in range(3, priority_flag - 1, -1):
level_name = priorities.get(level, level)
# print headers
proc_strs = []
# skip any levels that aren't in the result
if level not in result:
continue
# skip any empty result levels
if len(result[level]) > 0:
# only print priority headers at top level, i.e. non-child
# datasets
if _top_level:
width = 2 * self.col_width
print("\n")
print("{:^{width}}".format(level_name, width=width))
print("-" * width)
data_issues = [process_table(res, check) for res in result[level]]
has_printed = False
for issue, reasons in data_issues:
# if this isn't the first printed issue, add a newline
# separating this and the previous level
if has_printed:
print("")
# join alphabetized reasons together
reason_str = "\n".join(
"* {}".format(r) for r in sorted(reasons, key=lambda x: x[0])
)
proc_str = "{}\n{}".format(issue, reason_str)
print(proc_str)
proc_strs.append(proc_str)
has_printed = True
return "\n".join(proc_strs)
def process_doc(self, doc):
"""
Attempt to parse an xml string conforming to either an SOS or SensorML
dataset and return the results
"""
xml_doc = ET.fromstring(doc)
if xml_doc.tag == "{http://www.opengis.net/sos/1.0}Capabilities":
ds = SensorObservationService(None, xml=doc)
# SensorObservationService does not store the etree doc root,
# so maybe use monkey patching here for now?
ds._root = xml_doc
elif xml_doc.tag == "{http://www.opengis.net/sensorML/1.0.1}SensorML":
ds = SensorML(xml_doc)
else:
raise ValueError("Unrecognized XML root element: {}".format(xml_doc.tag))
return ds
def generate_dataset(self, cdl_path):
"""
Use ncgen to generate a netCDF file from a .cdl file
Returns the path to the generated netcdf file. If ncgen fails, uses
sys.exit(1) to terminate program so a long stack trace is not reported
to the user.
:param str cdl_path: Absolute path to cdl file that is used to generate netCDF file
"""
if (
".cdl" in cdl_path
): # it's possible the filename doesn't have the .cdl extension
ds_str = cdl_path.replace(".cdl", ".nc")
else:
ds_str = cdl_path + ".nc"
# generate netCDF-4 file
iostat = subprocess.run(
["ncgen", "-k", "nc4", "-o", ds_str, cdl_path], stderr=subprocess.PIPE
)
if iostat.returncode != 0:
# if not successful, create netCDF classic file
print(
"netCDF-4 file could not be generated from cdl file with " + "message:"
)
print(iostat.stderr.decode())
print("Trying to create netCDF Classic file instead.")
iostat = subprocess.run(
["ncgen", "-k", "nc3", "-o", ds_str, cdl_path], stderr=subprocess.PIPE
)
if iostat.returncode != 0:
# Exit program if neither a netCDF Classic nor a netCDF-4 file
# could be created.
print(
"netCDF Classic file could not be generated from cdl file"
+ "with message:"
)
print(iostat.stderr.decode())
sys.exit(1)
return ds_str
def load_dataset(self, ds_str):
"""
Returns an instantiated instance of either a netCDF file or an SOS
mapped DS object.
:param str ds_str: URL of the resource to load
"""
# If it's a remote URL load it as a remote resource, otherwise treat it
# as a local resource.
pr = urlparse(ds_str)
if pr.netloc:
return self.load_remote_dataset(ds_str)
else:
return self.load_local_dataset(ds_str)
def check_remote_netcdf(self, ds_str):
if netcdf.is_remote_netcdf(ds_str):
response = requests.get(ds_str, allow_redirects=True, timeout=60)
try:
return MemoizedDataset(
urlparse(response.url).path, memory=response.content
)
except OSError as e:
# handle case when netCDF C libs weren't compiled with
# in-memory support by using tempfile
with tempnc(response.content) as _nc:
return MemoizedDataset(_nc)
def load_remote_dataset(self, ds_str):
"""
Returns a dataset instance for the remote resource, either OPeNDAP or SOS
:param str ds_str: URL to the remote resource
"""
url_parsed = urlparse(ds_str)
# ERDDAP TableDAP request
nc_remote_result = self.check_remote_netcdf(ds_str)
if nc_remote_result:
return nc_remote_result
# if application/x-netcdf wasn't detected in the Content-Type headers
# and this is some kind of erddap tabledap form, then try to get the
# .ncCF file from ERDDAP
elif "tabledap" in ds_str and not url_parsed.query:
# modify ds_str to contain the full variable request
variables_str = opendap.create_DAP_variable_str(ds_str)
# join to create a URL to an .ncCF resource
ds_str = "{}.ncCF?{}".format(ds_str, variables_str)
nc_remote_result = self.check_remote_netcdf(ds_str)
if nc_remote_result:
return nc_remote_result
# if it's just an OPeNDAP endpoint, use that
elif opendap.is_opendap(ds_str):
return Dataset(ds_str)
# Check if the HTTP response is XML, if it is, it's likely SOS so
# we'll attempt to parse the response as SOS.
# Some SOS servers don't seem to support HEAD requests.
# Issue GET instead if we reach here and can't get the response
response = requests.get(ds_str, allow_redirects=True, timeout=60)
content_type = response.headers.get("content-type")
if content_type.split(";")[0] == "text/xml":
return self.process_doc(response.content)
else:
raise ValueError(
"Unknown service with content-type: {}".format(content_type)
)
def load_local_dataset(self, ds_str):
"""
Returns a dataset instance for the local resource
:param ds_str: Path to the resource
"""
if cdl.is_cdl(ds_str):
ds_str = self.generate_dataset(ds_str)
if netcdf.is_netcdf(ds_str):
return MemoizedDataset(ds_str)
# Assume this is just a Generic File if it exists
if os.path.isfile(ds_str):
return GenericFile(ds_str)
raise ValueError("File is an unknown format")
def scores(self, raw_scores):
"""
Transforms raw scores from a single checker into a fully tallied and grouped scoreline.
"""
grouped = self._group_raw(raw_scores)
return grouped
def _group_raw(self, raw_scores, cur=None, level=1):
"""
Internal recursive method to group raw scores into a cascading score summary.
Only top level items are tallied for scores.
@param list raw_scores: list of raw scores (Result objects)
"""
def trim_groups(r):
if isinstance(r.name, tuple) or isinstance(r.name, list):
new_name = r.name[1:]
else:
new_name = []
return Result(r.weight, r.value, new_name, r.msgs)
# CHECK FOR TERMINAL CONDITION: all raw_scores.name are single length
# @TODO could have a problem here with scalar name, but probably still works
terminal = [len(x.name) for x in raw_scores]
if terminal == [0] * len(raw_scores):
return []
def group_func(r):
"""
Takes a Result object and slices off the first element of its name
if its's a tuple. Otherwise, does nothing to the name. Returns the
Result's name and weight in a tuple to be used for sorting in that
order in a groupby function.
@param Result r
@return tuple (str, int)
"""
if isinstance(r.name, tuple) or isinstance(r.name, list):
if len(r.name) == 0:
retval = ""
else:
retval = r.name[0:1][0]
else:
retval = r.name
return retval, r.weight
# END INTERNAL FUNCS ##########################################
# NOTE until this point, *ALL* Results in raw_scores are
# individual Result objects.
# sort then group by name, then by priority weighting
grouped = itertools.groupby(sorted(raw_scores, key=group_func), key=group_func)
# NOTE: post-grouping, grouped looks something like
# [(('Global Attributes', 1), <itertools._grouper at 0x7f10982b5390>),
# (('Global Attributes', 3), <itertools._grouper at 0x7f10982b5438>),
# (('Not a Global Attr', 1), <itertools._grouper at 0x7f10982b5470>)]
# (('Some Variable', 2), <itertools._grouper at 0x7f10982b5400>),
ret_val = []
for k, v in grouped: # iterate through the grouped tuples
k = k[0] # slice ("name", weight_val) --> "name"
v = list(v) # from itertools._grouper to list
cv = self._group_raw(list(map(trim_groups, v)), k, level + 1)
if len(cv):
# if this node has children, max weight of children + sum of all the scores
max_weight = max([x.weight for x in cv])
sum_scores = tuple(map(sum, list(zip(*([x.value for x in cv])))))
msgs = []
else:
max_weight = max([x.weight for x in v])
sum_scores = tuple(
map(sum, list(zip(*([self._translate_value(x.value) for x in v]))))
)
msgs = sum([x.msgs for x in v], [])
ret_val.append(
Result(
name=k, weight=max_weight, value=sum_scores, children=cv, msgs=msgs
)
)
return ret_val
def _translate_value(self, val):
"""
Turns shorthand True/False/None checks into full scores (1, 1)/(0, 1)/(0, 0).
Leaves full scores alone.
"""
if val is True:
return (1, 1)
elif val is False:
return (0, 1)
elif val is None:
return (0, 0)
return val
| |
"""
**Parameters:**
| markdown_directory (*string*) -- Generally used for development purposes only.
| project_directory (*string) -- Path to your project directory
| css_directory (*string*) -- Path to your projects CSS directory
| docs_directory (*string*) -- Path to Sphinx docs.
| file_types = (*tuple of strings*) -- All file types/extensions to search for in the defined project_directory
that contain encoded class selectors.
| timing_enabled (*bool*) -- Run performance timer
| markdown_docs (*bool*) -- Generate a markdown files that provides a quick syntax and clashing alias reference.
| html_docs (*bool*) -- Generate a html file that provides a quick syntax and clashing alias reference.
| rst_docs (*bool*) -- Generate a sphinx rst file that provides a quick syntax and clashing alias reference.
| human_readable (*bool*) -- Generate a standard human readable css file.
| minify (*bool*) -- Generate a minified version of the css file.
| media_queries_enabled (*bool*) -- Generate breakpoint and scaling media queries.
| use_em (*bool*) -- A ``pixels`` to ``em`` unit conversion flag. True enables unit conversion.
False disables unit conversions meaning any pixel value remains unchanged.
| base (*int*) -- Base used for unit conversion (typically set to 16). The pixel value will be divided by
``base`` during unit conversion.
| xxsmall (*tuple of floats*) -- (0px, upper limit in pixels)
| xsmall (*tuple of floats*) -- (xxsmall upper limit + 1px, upper limit in pixels)
| small (*tuple of floats*) -- (xsmall upper limit + 1px, upper limit in pixels)
| medium (*tuple of floats*) -- (small upper limit + 1px, upper limit in pixels)
| large (*tuple of floats*) -- (medium upper limit + 1px, upper limit in pixels)
| xlarge (*tuple of floats*) -- (large upper limit + 1px, upper limit in pixels)
| xxlarge (*tuple of floats*) -- (xlarge upper limit + 1px, upper limit in pixels)
| giant (*tuple of floats*) -- (xxlarge upper limit + 1px, upper limit in pixels)
| xgiant (*tuple of floats*) -- (giant upper limit + 1px, upper limit in pixels)
| xxgiant (*tuple of floats*) -- (xgiant upper limit + 1px, 1E+6) [Technically the upper limit is infinity,
but CSS does not permit it.]
**cssutils Patch:**
``cssutils`` does not currently support CSS 3 Units. The patch in this file allows length units of
``q``, ``ch``, ``rem``, ``vw``, ``vh``, ``vmin``, and ``vmax``. It also allows angle units of ``turn``.
"""
# python 2
from __future__ import absolute_import, division, unicode_literals
from builtins import round
# builtins
from os import getcwd, path
from string import digits
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
# plugins
from cssutils import profile
__project__ = 'blowdrycss'
# Set project_directory to the one containing the files you want to DRY out.
# Change it to whatever you want.
cwd = getcwd()
if cwd.endswith('unit_tests'): # Allows running of pycharm unittest.
markdown_directory = path.join(cwd, 'test_markdown')
project_directory = path.join(cwd, 'test_examplesite')
css_directory = path.join(project_directory, 'test_css')
docs_directory = path.join(cwd, 'test_docs')
else: # Run unittest cmd from the root directory.
markdown_directory = path.join(cwd, 'blowdrycss', 'unit_tests', 'test_markdown')
project_directory = path.join(cwd, 'blowdrycss', 'unit_tests', 'test_examplesite')
css_directory = path.join(project_directory, 'test_css')
docs_directory = path.join(cwd, 'blowdrycss', 'unit_tests', 'test_docs')
# Logging
logging_enabled = False
logging_level = DEBUG # Allowed: DEBUG, INFO, WARNING, ERROR, CRITICAL
log_to_console = True
log_to_file = True
log_directory = path.join(cwd, 'log')
log_file_name = 'blowdrycss.log'
one_mega_byte = 1048576
log_file_size = 4 * one_mega_byte # Max log file size
log_backup_count = 1 # Maximum number of log files.
# All file types/extensions to search for in the defined project_directory that contain encoded class selectors.
# Example format: ('*.html', )
file_types = ('*.html', )
# Boolean Flags
timing_enabled = True # Run performance timer
markdown_docs = True # Generate a markdown files that provides a quick syntax and clashing alias reference.
html_docs = True # Generate a html file that provides a quick syntax and clashing alias reference.
rst_docs = True # Generate a sphinx rst file that provides a quick syntax and clashing alias reference.
human_readable = True # Generate a standard human readable css file.
minify = True # Generate a minified version of the css file.
media_queries_enabled = True # Generate breakpoint and scaling media queries.
# ...Not Implemented Yet...
# use_rgb = True
# extra_dry = False # Combine identical CSS discovered under different class selector names.
# TODO: Implement these in a fashion similar to the performance timer.
# auto_generate = False # Automatically generates blowdry.css file when a project HTML file is saved.
# http_server = False # Auto-Start a simple webserver on localhost:8080.
# public_url = False # Uses ngrok to generate a temporary public url for testings and demo purposes.
# condense_classes = False # Edits HTML Files after discovering common patterns (Not DRY do not implement).
# Unit Conversion Defaults
use_em = True
base = 16
def px_to_em(pixels):
""" Convert a numeric value from px to em using ``settings.base`` as the unit conversion factor.
**Rules:**
- ``pixels`` shall only contain [0-9.-].
- Inputs that contain any other value are simply passed through unchanged.
- Default ``base`` is 16 meaning ``16px = 1rem``
**Note:** Does not check the ``property_name`` or ``use_em`` values. Rather, it blindly converts
whatever input is provided. The calling method is expected to know what it is doing.
Rounds float to a maximum of 4 decimal places.
:type pixels: str, int, float
:param pixels: A numeric value with the units stripped.
:return: (str)
- If the input is convertible return the converted number as a string with the units ``em``
appended to the end.
- If the input is not convertible return the unprocessed input.
>>> from settings.blowdrycss_settings import px_to_em
>>> # settings.use_em = True
>>> px_to_em(pixels='-16.0')
-1em
>>> # settings.use_em = False
>>> px_to_em(pixels='42px')
42px
>>> # Invalid input passes through.
>>> px_to_em(pixels='invalid')
invalid
"""
if set(str(pixels)) <= set(digits + '-.'):
em = float(pixels) / float(base)
em = round(em, 4)
em = str(em) + 'em' # Add 'em'.
return em
return pixels
# Default Screen Breakpoints / Transition Triggers
# Tuple Format (Lower Limit, Upper Limit) in pixels.
# Note: These values change if unit conversion is enabled i.e. ``use_em`` is ``True``.
# Common Screen Resolutions: https://en.wikipedia.org/wiki/List_of_common_resolutions
xxsmall = (px_to_em(0), px_to_em(120)) # 0.0 - 7.5em
xsmall = (px_to_em(121), px_to_em(240)) # 7.5625 - 15.0em
small = (px_to_em(241), px_to_em(480)) # 15.0625 - 30.0em
medium = (px_to_em(481), px_to_em(720)) # 30.0625 - 45.0em # Typical mobile device break point @ 720px.
large = (px_to_em(721), px_to_em(1024)) # 45.0625 - 64.0em
xlarge = (px_to_em(1025), px_to_em(1366)) # 64.0625 - 85.375em
xxlarge = (px_to_em(1367), px_to_em(1920)) # 85.4375 - 120.0em
giant = (px_to_em(1921), px_to_em(2560)) # 120.0625 - 160.0em
xgiant = (px_to_em(2561), px_to_em(2800)) # 160.0625 - 175.0em
xxgiant = (px_to_em(2801), px_to_em(10**6)) # 175.0625 - float("inf")) # Python 2.x representation of Infinity.
# Custom CSS Property Syntax
# When adding a new alias it must end with a '-'.
# To add a new alias 'bgc-' for 'background-color' add the (key: value) pair 'background-color': {'bgc-'},
# to custom_property_alias_dict.
#
# key = A valid CSS property name (consult the W3C standard and datalibrary.DataLibrary.property_names).
# value = An alias set().
#
# If 'bgc' is used without the '-', then blowdrycss assumes that 'bgc' is a valid CSS property (which it is not).
# This will result in 'bgc' being discarded later on as an invalid css class selector.
#
# Defining 'bgc-' allows the following encoded class selector syntax:
# 'bgc-blue', 'bgc-h000', ..., 'bgc-red'
#
# These encoded class selectors can be used inside of Web project files matching 'file_type' defined above.
custom_property_alias_dict = {
'background': {'bg-', },
'background-color': {'bgc-', 'bg-c-', 'bg-color-', },
'color': {'c-', },
'font-size': {'fsize-', 'f-size-', },
'font-weight': {'fweight-', 'f-weight-', },
'height': {'h-', },
'margin': {'m-', },
'margin-top': {'m-top-', },
'margin-bottom': {'m-bot-', },
'padding': {'p-', 'pad-', },
'padding-top': {'p-top-', },
'position': {'pos-', },
'text-align': {'talign-', 't-align-', },
'vertical-align': {'valign-', 'v-align-', },
'width': {'w-', },
}
# Patches cssutils
profile._MACROS['length'] = r'0|{num}(em|ex|px|in|cm|mm|pt|pc|q|ch|rem|vw|vh|vmin|vmax)'
profile._MACROS['positivelength'] = r'0|{positivenum}(em|ex|px|in|cm|mm|pt|pc|q|ch|rem|vw|vh|vmin|vmax)'
profile._MACROS['angle'] = r'0|{num}(deg|grad|rad|turn)'
profile._resetProperties()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lanczos algorithms."""
# TODO(rmlarsen): Add implementation of symmetric Lanczos algorithm.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
def lanczos_bidiag(operator,
k,
orthogonalize=True,
starting_vector=None,
name="lanczos_bidiag"):
"""Computes a Lanczos bidiagonalization for a linear operator.
Computes matrices `U` of shape `[m, k+1]`, `V` of shape `[n, k]` and lower
bidiagonal matrix `B` of shape `[k+1, k]`, that satisfy the equations
`A * V = U * B` and `A' * U[:, :-1] = V * B[:-1, :]'`.
The columns of `U` are orthonormal and form a basis for the Krylov subspace
`K(A*A', U[:,0])`.
The columns of `V` are orthonormal and form a basis for the Krylov subspace
`K(A'*A, A' U[:,0])`.
Args:
operator: An object representing a linear operator with attributes:
- shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
`[M, N]`.
- dtype: The datatype of input to and output from `apply` and
`apply_adjoint`.
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
- apply_adjoint: Callable object taking a vector `x` as input and
returning a vector with the result of applying the adjoint operator
to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
return `conj(transpose(A)) * x`.
k: An integer or a scalar Tensor of type `int32`. Determines the maximum
number of steps to run. If an invariant subspace is found, the algorithm
may terminate before `k` steps have been run.
orthogonalize: If `True`, perform full orthogonalization. If `False` no
orthogonalization is performed.
starting_vector: If not null, must be a `Tensor` of shape `[n]`.
name: A name scope for the operation.
Returns:
output: A namedtuple representing a Lanczos bidiagonalization of
`operator` with attributes:
u: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[0], k_actual+1]`, where `k_actual` is the number of
steps run.
v: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[1], k_actual]`, where `k_actual` is the number of steps
run.
alpha: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
beta: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
"""
def tarray(size, dtype, name):
return tensor_array_ops.TensorArray(
dtype=dtype, size=size, tensor_array_name=name, clear_after_read=False)
# Reads a row-vector at location i in tarray and returns it as a
# column-vector.
def read_colvec(tarray, i):
return array_ops.expand_dims(tarray.read(i), -1)
# Writes an column-vector as a row-vecor at location i in tarray.
def write_colvec(tarray, colvec, i):
return tarray.write(i, array_ops.squeeze(colvec))
# Ephemeral class holding Lanczos bidiagonalization state:
# u = left Lanczos vectors
# v = right Lanczos vectors
# alpha = diagonal of B_k.
# beta = subdiagonal of B_k.
# Notice that we store the left and right Lanczos vectors as the _rows_
# of u and v. This is done because tensors are stored row-major and
# TensorArray only supports packing along dimension 0.
lanzcos_bidiag_state = collections.namedtuple("LanczosBidiagState",
["u", "v", "alpha", "beta"])
def update_state(old, i, u, v, alpha, beta):
return lanzcos_bidiag_state(
write_colvec(old.u, u, i + 1),
write_colvec(old.v, v, i),
old.alpha.write(i, alpha), old.beta.write(i, beta))
def gram_schmidt_step(j, basis, v):
"""Makes v orthogonal to the j'th vector in basis."""
v_shape = v.get_shape()
basis_vec = read_colvec(basis, j)
v -= math_ops.matmul(basis_vec, v, adjoint_a=True) * basis_vec
v.set_shape(v_shape)
return j + 1, basis, v
def orthogonalize_once(i, basis, v):
j = constant_op.constant(0, dtype=dtypes.int32)
_, _, v = control_flow_ops.while_loop(lambda j, basis, v: j < i,
gram_schmidt_step, [j, basis, v])
return util.l2normalize(v)
# Iterated modified Gram-Schmidt orthogonalization adapted from PROPACK.
# TODO(rmlarsen): This is possibly the slowest implementation of
# iterated Gram-Schmidt orthogonalization since the abacus. Move to C++.
def orthogonalize_(i, basis, v):
v_norm = util.l2norm(v)
v_new, v_new_norm = orthogonalize_once(i, basis, v)
# If the norm decreases more than 1/sqrt(2), run a second
# round of MGS. See proof in:
# B. N. Parlett, ``The Symmetric Eigenvalue Problem'',
# Prentice-Hall, Englewood Cliffs, NJ, 1980. pp. 105-109
return control_flow_ops.cond(v_new_norm < 0.7071 * v_norm,
lambda: orthogonalize_once(i, basis, v),
lambda: (v_new, v_new_norm))
def stopping_criterion(i, _):
# TODO(rmlarsen): Stop if an invariant subspace is detected.
return i < k
def lanczos_bidiag_step(i, ls):
"""Extends the Lanczos bidiagonalization ls by one step."""
u = read_colvec(ls.u, i)
r = operator.apply_adjoint(u)
# The shape inference doesn't work across cond, save and reapply the shape.
r_shape = r.get_shape()
r = control_flow_ops.cond(
i > 0, lambda: r - ls.beta.read(i - 1) * read_colvec(ls.v, i - 1),
lambda: r)
r.set_shape(r_shape)
if orthogonalize:
v, alpha = orthogonalize_(i - 1, ls.v, r)
else:
v, alpha = util.l2normalize(r)
p = operator.apply(v) - alpha * u
if orthogonalize:
u, beta = orthogonalize_(i, ls.u, p)
else:
u, beta = util.l2normalize(p)
return i + 1, update_state(ls, i, u, v, alpha, beta)
with ops.name_scope(name):
dtype = operator.dtype
if starting_vector is None:
starting_vector = random_ops.random_uniform(
operator.shape[:1], -1, 1, dtype=dtype)
u0, _ = util.l2normalize(starting_vector)
ls = lanzcos_bidiag_state(
u=write_colvec(tarray(k + 1, dtype, "u"), u0, 0),
v=tarray(k, dtype, "v"),
alpha=tarray(k, dtype, "alpha"),
beta=tarray(k, dtype, "beta"))
i = constant_op.constant(0, dtype=dtypes.int32)
_, ls = control_flow_ops.while_loop(stopping_criterion, lanczos_bidiag_step,
[i, ls])
return lanzcos_bidiag_state(
array_ops.matrix_transpose(ls.u.stack()),
array_ops.matrix_transpose(ls.v.stack()),
ls.alpha.stack(), ls.beta.stack())
# TODO(rmlarsen): Implement C++ ops for handling bidiagonal matrices
# efficiently. Such a module should provide
# - multiplication,
# - linear system solution by back-substitution,
# - QR factorization,
# - SVD.
def bidiag_matmul(matrix, alpha, beta, adjoint_b=False, name="bidiag_matmul"):
"""Multiplies a matrix by a bidiagonal matrix.
alpha and beta are length k vectors representing the diagonal and first lower
subdiagonal of (K+1) x K matrix B.
If adjoint_b is False, computes A * B as follows:
A * B = A[:, :-1] * diag(alpha) + A[:, 1:] * diag(beta)
If adjoint_b is True, computes A * B[:-1, :]' as follows
A * B[:-1, :]' =
A * diag(alpha) + [zeros(m,1), A[:, :-1] * diag(beta[:-1])]
Args:
matrix: A rank-2 `Tensor` representing matrix A.
alpha: A rank-1 `Tensor` representing the diagonal of B.
beta: A rank-1 `Tensor` representing the lower subdiagonal diagonal of B.
adjoint_b: `bool` determining what to compute.
name: A name scope for the operation.
Returns:
If `adjoint_b` is False the `A * B` is returned.
If `adjoint_b` is True the `A * B'` is returned.
"""
with ops.name_scope(name):
alpha = array_ops.expand_dims(alpha, 0)
if adjoint_b is False:
beta = array_ops.expand_dims(beta, 0)
return matrix[:, :-1] * alpha + matrix[:, 1:] * beta
else:
beta = array_ops.expand_dims(beta[:-1], 0)
shape = array_ops.shape(matrix)
zero_column = array_ops.expand_dims(
array_ops.zeros(
shape[:1], dtype=matrix.dtype), 1)
return matrix * alpha + array_ops.concat(
[zero_column, matrix[:, :-1] * beta], 1)
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for making a service.TestService more amenable to use in tests."""
import collections
import threading
# test_control, _service, and test_interfaces are referenced from specification
# in this module.
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.foundation import stream
from grpc.framework.foundation import stream_util
from grpc.framework.interfaces.face import face
from grpc_test.framework.common import test_control # pylint: disable=unused-import
from grpc_test.framework.interfaces.face import _service # pylint: disable=unused-import
from grpc_test.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
_IDENTITY = lambda x: x
class TestServiceDigest(
collections.namedtuple(
'TestServiceDigest',
('methods',
'inline_method_implementations',
'event_method_implementations',
'multi_method_implementation',
'unary_unary_messages_sequences',
'unary_stream_messages_sequences',
'stream_unary_messages_sequences',
'stream_stream_messages_sequences',))):
"""A transformation of a service.TestService.
Attributes:
methods: A dict from method group-name pair to test_interfaces.Method object
describing the RPC methods that may be called during the test.
inline_method_implementations: A dict from method group-name pair to
face.MethodImplementation object to be used in tests of in-line calls to
behaviors under test.
event_method_implementations: A dict from method group-name pair to
face.MethodImplementation object to be used in tests of event-driven calls
to behaviors under test.
multi_method_implementation: A face.MultiMethodImplementation to be used in
tests of generic calls to behaviors under test.
unary_unary_messages_sequences: A dict from method group-name pair to
sequence of service.UnaryUnaryTestMessages objects to be used to test the
identified method.
unary_stream_messages_sequences: A dict from method group-name pair to
sequence of service.UnaryStreamTestMessages objects to be used to test the
identified method.
stream_unary_messages_sequences: A dict from method group-name pair to
sequence of service.StreamUnaryTestMessages objects to be used to test the
identified method.
stream_stream_messages_sequences: A dict from method group-name pair to
sequence of service.StreamStreamTestMessages objects to be used to test
the identified method.
"""
class _BufferingConsumer(stream.Consumer):
"""A trivial Consumer that dumps what it consumes in a user-mutable buffer."""
def __init__(self):
self.consumed = []
self.terminated = False
def consume(self, value):
self.consumed.append(value)
def terminate(self):
self.terminated = True
def consume_and_terminate(self, value):
self.consumed.append(value)
self.terminated = True
class _InlineUnaryUnaryMethod(face.MethodImplementation):
def __init__(self, unary_unary_test_method, control):
self._test_method = unary_unary_test_method
self._control = control
self.cardinality = cardinality.Cardinality.UNARY_UNARY
self.style = style.Service.INLINE
def unary_unary_inline(self, request, context):
response_list = []
self._test_method.service(
request, response_list.append, context, self._control)
return response_list.pop(0)
class _EventUnaryUnaryMethod(face.MethodImplementation):
def __init__(self, unary_unary_test_method, control, pool):
self._test_method = unary_unary_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.UNARY_UNARY
self.style = style.Service.EVENT
def unary_unary_event(self, request, response_callback, context):
if self._pool is None:
self._test_method.service(
request, response_callback, context, self._control)
else:
self._pool.submit(
self._test_method.service, request, response_callback, context,
self._control)
class _InlineUnaryStreamMethod(face.MethodImplementation):
def __init__(self, unary_stream_test_method, control):
self._test_method = unary_stream_test_method
self._control = control
self.cardinality = cardinality.Cardinality.UNARY_STREAM
self.style = style.Service.INLINE
def unary_stream_inline(self, request, context):
response_consumer = _BufferingConsumer()
self._test_method.service(
request, response_consumer, context, self._control)
for response in response_consumer.consumed:
yield response
class _EventUnaryStreamMethod(face.MethodImplementation):
def __init__(self, unary_stream_test_method, control, pool):
self._test_method = unary_stream_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.UNARY_STREAM
self.style = style.Service.EVENT
def unary_stream_event(self, request, response_consumer, context):
if self._pool is None:
self._test_method.service(
request, response_consumer, context, self._control)
else:
self._pool.submit(
self._test_method.service, request, response_consumer, context,
self._control)
class _InlineStreamUnaryMethod(face.MethodImplementation):
def __init__(self, stream_unary_test_method, control):
self._test_method = stream_unary_test_method
self._control = control
self.cardinality = cardinality.Cardinality.STREAM_UNARY
self.style = style.Service.INLINE
def stream_unary_inline(self, request_iterator, context):
response_list = []
request_consumer = self._test_method.service(
response_list.append, context, self._control)
for request in request_iterator:
request_consumer.consume(request)
request_consumer.terminate()
return response_list.pop(0)
class _EventStreamUnaryMethod(face.MethodImplementation):
def __init__(self, stream_unary_test_method, control, pool):
self._test_method = stream_unary_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.STREAM_UNARY
self.style = style.Service.EVENT
def stream_unary_event(self, response_callback, context):
request_consumer = self._test_method.service(
response_callback, context, self._control)
if self._pool is None:
return request_consumer
else:
return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
class _InlineStreamStreamMethod(face.MethodImplementation):
def __init__(self, stream_stream_test_method, control):
self._test_method = stream_stream_test_method
self._control = control
self.cardinality = cardinality.Cardinality.STREAM_STREAM
self.style = style.Service.INLINE
def stream_stream_inline(self, request_iterator, context):
response_consumer = _BufferingConsumer()
request_consumer = self._test_method.service(
response_consumer, context, self._control)
for request in request_iterator:
request_consumer.consume(request)
while response_consumer.consumed:
yield response_consumer.consumed.pop(0)
response_consumer.terminate()
class _EventStreamStreamMethod(face.MethodImplementation):
def __init__(self, stream_stream_test_method, control, pool):
self._test_method = stream_stream_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.STREAM_STREAM
self.style = style.Service.EVENT
def stream_stream_event(self, response_consumer, context):
request_consumer = self._test_method.service(
response_consumer, context, self._control)
if self._pool is None:
return request_consumer
else:
return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
class _UnaryConsumer(stream.Consumer):
"""A Consumer that only allows consumption of exactly one value."""
def __init__(self, action):
self._lock = threading.Lock()
self._action = action
self._consumed = False
self._terminated = False
def consume(self, value):
with self._lock:
if self._consumed:
raise ValueError('Unary consumer already consumed!')
elif self._terminated:
raise ValueError('Unary consumer already terminated!')
else:
self._consumed = True
self._action(value)
def terminate(self):
with self._lock:
if not self._consumed:
raise ValueError('Unary consumer hasn\'t yet consumed!')
elif self._terminated:
raise ValueError('Unary consumer already terminated!')
else:
self._terminated = True
def consume_and_terminate(self, value):
with self._lock:
if self._consumed:
raise ValueError('Unary consumer already consumed!')
elif self._terminated:
raise ValueError('Unary consumer already terminated!')
else:
self._consumed = True
self._terminated = True
self._action(value)
class _UnaryUnaryAdaptation(object):
def __init__(self, unary_unary_test_method):
self._method = unary_unary_test_method
def service(self, response_consumer, context, control):
def action(request):
self._method.service(
request, response_consumer.consume_and_terminate, context, control)
return _UnaryConsumer(action)
class _UnaryStreamAdaptation(object):
def __init__(self, unary_stream_test_method):
self._method = unary_stream_test_method
def service(self, response_consumer, context, control):
def action(request):
self._method.service(request, response_consumer, context, control)
return _UnaryConsumer(action)
class _StreamUnaryAdaptation(object):
def __init__(self, stream_unary_test_method):
self._method = stream_unary_test_method
def service(self, response_consumer, context, control):
return self._method.service(
response_consumer.consume_and_terminate, context, control)
class _MultiMethodImplementation(face.MultiMethodImplementation):
def __init__(self, methods, control, pool):
self._methods = methods
self._control = control
self._pool = pool
def service(self, group, name, response_consumer, context):
method = self._methods.get(group, name, None)
if method is None:
raise face.NoSuchMethodError(group, name)
elif self._pool is None:
return method(response_consumer, context, self._control)
else:
request_consumer = method(response_consumer, context, self._control)
return stream_util.ThreadSwitchingConsumer(request_consumer, self._pool)
class _Assembly(
collections.namedtuple(
'_Assembly',
['methods', 'inlines', 'events', 'adaptations', 'messages'])):
"""An intermediate structure created when creating a TestServiceDigest."""
def _assemble(
scenarios, identifiers, inline_method_constructor, event_method_constructor,
adapter, control, pool):
"""Creates an _Assembly from the given scenarios."""
methods = {}
inlines = {}
events = {}
adaptations = {}
messages = {}
for identifier, scenario in scenarios.iteritems():
if identifier in identifiers:
raise ValueError('Repeated identifier "(%s, %s)"!' % identifier)
test_method = scenario[0]
inline_method = inline_method_constructor(test_method, control)
event_method = event_method_constructor(test_method, control, pool)
adaptation = adapter(test_method)
methods[identifier] = test_method
inlines[identifier] = inline_method
events[identifier] = event_method
adaptations[identifier] = adaptation
messages[identifier] = scenario[1]
return _Assembly(methods, inlines, events, adaptations, messages)
def digest(service, control, pool):
"""Creates a TestServiceDigest from a TestService.
Args:
service: A _service.TestService.
control: A test_control.Control.
pool: If RPC methods should be serviced in a separate thread, a thread pool.
None if RPC methods should be serviced in the thread belonging to the
run-time that calls for their service.
Returns:
A TestServiceDigest synthesized from the given service.TestService.
"""
identifiers = set()
unary_unary = _assemble(
service.unary_unary_scenarios(), identifiers, _InlineUnaryUnaryMethod,
_EventUnaryUnaryMethod, _UnaryUnaryAdaptation, control, pool)
identifiers.update(unary_unary.inlines)
unary_stream = _assemble(
service.unary_stream_scenarios(), identifiers, _InlineUnaryStreamMethod,
_EventUnaryStreamMethod, _UnaryStreamAdaptation, control, pool)
identifiers.update(unary_stream.inlines)
stream_unary = _assemble(
service.stream_unary_scenarios(), identifiers, _InlineStreamUnaryMethod,
_EventStreamUnaryMethod, _StreamUnaryAdaptation, control, pool)
identifiers.update(stream_unary.inlines)
stream_stream = _assemble(
service.stream_stream_scenarios(), identifiers, _InlineStreamStreamMethod,
_EventStreamStreamMethod, _IDENTITY, control, pool)
identifiers.update(stream_stream.inlines)
methods = dict(unary_unary.methods)
methods.update(unary_stream.methods)
methods.update(stream_unary.methods)
methods.update(stream_stream.methods)
adaptations = dict(unary_unary.adaptations)
adaptations.update(unary_stream.adaptations)
adaptations.update(stream_unary.adaptations)
adaptations.update(stream_stream.adaptations)
inlines = dict(unary_unary.inlines)
inlines.update(unary_stream.inlines)
inlines.update(stream_unary.inlines)
inlines.update(stream_stream.inlines)
events = dict(unary_unary.events)
events.update(unary_stream.events)
events.update(stream_unary.events)
events.update(stream_stream.events)
return TestServiceDigest(
methods,
inlines,
events,
_MultiMethodImplementation(adaptations, control, pool),
unary_unary.messages,
unary_stream.messages,
stream_unary.messages,
stream_stream.messages)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
User-facing classes for Metrics API.
The classes in this file allow users to define and use metrics to be collected
and displayed as part of their pipeline execution.
- Metrics - This class lets pipeline and transform writers create and access
metric objects such as counters, distributions, etc.
"""
import inspect
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.metrics.metricbase import Counter, Distribution
from apache_beam.metrics.metricbase import MetricName
__all__ = ['Metrics', 'MetricsFilter']
class Metrics(object):
"""Lets users create/access metric objects during pipeline execution."""
@staticmethod
def get_namespace(namespace):
if inspect.isclass(namespace):
return '{}.{}'.format(namespace.__module__, namespace.__name__)
elif isinstance(namespace, str):
return namespace
else:
raise ValueError('Unknown namespace type')
@staticmethod
def counter(namespace, name):
"""Obtains or creates a Counter metric.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
Returns:
A Counter object.
"""
namespace = Metrics.get_namespace(namespace)
return Metrics.DelegatingCounter(MetricName(namespace, name))
@staticmethod
def distribution(namespace, name):
"""Obtains or creates a Distribution metric.
Distribution metrics are restricted to integer-only distributions.
Args:
namespace: A class or string that gives the namespace to a metric
name: A string that gives a unique name to a metric
Returns:
A Distribution object.
"""
namespace = Metrics.get_namespace(namespace)
return Metrics.DelegatingDistribution(MetricName(namespace, name))
class DelegatingCounter(Counter):
def __init__(self, metric_name):
self.metric_name = metric_name
def inc(self, n=1):
container = MetricsEnvironment.current_container()
if container is not None:
container.get_counter(self.metric_name).inc(n)
class DelegatingDistribution(Distribution):
def __init__(self, metric_name):
self.metric_name = metric_name
def update(self, value):
container = MetricsEnvironment.current_container()
if container is not None:
container.get_distribution(self.metric_name).update(value)
class MetricResults(object):
@staticmethod
def _matches_name(filter, metric_key):
if not filter.names and not filter.namespaces:
return True
if ((filter.namespaces and
metric_key.metric.namespace in filter.namespaces) or
(filter.names and
metric_key.metric.name in filter.names)):
return True
return False
@staticmethod
def _matches_sub_path(actual_scope, filter_scope):
start_pos = actual_scope.find(filter_scope)
end_pos = start_pos + len(filter_scope)
if start_pos == -1:
return False # No match at all
elif start_pos != 0 and actual_scope[start_pos - 1] != '/':
return False # The first entry was not exactly matched
elif end_pos != len(actual_scope) and actual_scope[end_pos] != '/':
return False # The last entry was not exactly matched
return True
@staticmethod
def _matches_scope(filter, metric_key):
if not filter.steps:
return True
for step in filter.steps:
if MetricResults._matches_sub_path(metric_key.step, step):
return True
return False
@staticmethod
def matches(filter, metric_key):
if filter is None:
return True
if (MetricResults._matches_name(filter, metric_key) and
MetricResults._matches_scope(filter, metric_key)):
return True
return False
def query(self, filter=None):
raise NotImplementedError
class MetricsFilter(object):
"""Simple object to filter metrics results.
This class is experimental. No backwards-compatibility guarantees.
If filters by matching a result's step-namespace-name with three internal
sets. No execution/matching logic is added to this object, so that it may
be used to construct arguments as an RPC request. It is left for runners
to implement matching logic by themselves.
"""
def __init__(self):
self._names = set()
self._namespaces = set()
self._steps = set()
@property
def steps(self):
return frozenset(self._steps)
@property
def names(self):
return frozenset(self._names)
@property
def namespaces(self):
return frozenset(self._namespaces)
def with_name(self, name):
return self.with_names([name])
def with_names(self, names):
if isinstance(names, str):
raise ValueError('Names must be a collection, not a string')
self._names.update(names)
return self
def with_namespace(self, namespace):
return self.with_namespaces([namespace])
def with_namespaces(self, namespaces):
if isinstance(namespaces, str):
raise ValueError('Namespaces must be an iterable, not a string')
self._namespaces.update([Metrics.get_namespace(ns) for ns in namespaces])
return self
def with_step(self, step):
return self.with_steps([step])
def with_steps(self, steps):
if isinstance(steps, str):
raise ValueError('Steps must be an iterable, not a string')
self._steps.update(steps)
return self
| |
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for Ceph backup service."""
import hashlib
import os
import tempfile
import threading
import uuid
import ddt
import mock
from os_brick.initiator import linuxrbd
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import units
import six
from six.moves import range
from cinder.backup import driver
from cinder.backup.drivers import ceph
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
RAISED_EXCEPTIONS = []
CONF = cfg.CONF
class MockException(Exception):
def __init__(self, *args, **kwargs):
RAISED_EXCEPTIONS.append(self.__class__)
class MockImageNotFoundException(MockException):
"""Used as mock for rbd.ImageNotFound."""
class MockImageBusyException(MockException):
"""Used as mock for rbd.ImageBusy."""
class MockObjectNotFoundException(MockException):
"""Used as mock for rados.MockObjectNotFoundException."""
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
# NOTE(dosaboy): mock Popen to, by default, raise Exception in order to
# ensure that any test ending up in a subprocess fails
# if not properly mocked.
@mock.patch('subprocess.Popen', spec=True)
# NOTE(dosaboy): mock out eventlet.sleep() so that it does nothing.
@mock.patch('eventlet.sleep', spec=True)
@mock.patch('time.time', spec=True)
# NOTE(dosaboy): set spec to empty object so that hasattr calls return
# False by default.
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd, mock_time, mock_sleep,
mock_popen):
mock_time.side_effect = inst.time_inc
mock_popen.side_effect = Exception
inst.mock_rados = mock_rados
inst.mock_rbd = mock_rbd
inst.mock_rbd.ImageBusy = MockImageBusyException
inst.mock_rbd.ImageNotFound = MockImageNotFoundException
inst.mock_rados.ObjectNotFound = MockObjectNotFoundException
inst.service.rbd = inst.mock_rbd
inst.service.rados = inst.mock_rados
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
@ddt.ddt
class BackupCephTestCase(test.TestCase):
"""Test case for ceph backup driver."""
def _create_volume_db_entry(self, id, size):
vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backupid, volid, size,
userid=str(uuid.uuid4()),
projectid=str(uuid.uuid4())):
backup = {'id': backupid, 'size': size, 'volume_id': volid,
'user_id': userid, 'project_id': projectid}
return db.backup_create(self.ctxt, backup)['id']
def time_inc(self):
self.counter += 1
return self.counter
def _get_wrapped_rbd_io(self, rbd_image):
rbd_meta = linuxrbd.RBDImageMetadata(rbd_image, 'pool_foo',
'user_foo', 'conf_foo')
return linuxrbd.RBDVolumeIOWrapper(rbd_meta)
def _setup_mock_popen(self, mock_popen, retval=None, p1hook=None,
p2hook=None):
class MockPopen(object):
hooks = [p2hook, p1hook]
def __init__(mock_inst, cmd, *args, **kwargs):
self.callstack.append('popen_init')
mock_inst.stdout = mock.Mock()
mock_inst.stdout.close = mock.Mock()
mock_inst.stdout.close.side_effect = \
lambda *args: self.callstack.append('stdout_close')
mock_inst.returncode = 0
hook = mock_inst.__class__.hooks.pop()
if hook is not None:
hook()
def communicate(mock_inst):
self.callstack.append('communicate')
return retval
mock_popen.side_effect = MockPopen
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(BackupCephTestCase, self).setUp()
self.ctxt = context.get_admin_context()
# Create volume.
self.volume_size = 1
self.volume_id = str(uuid.uuid4())
self._create_volume_db_entry(self.volume_id, self.volume_size)
self.volume = db.volume_get(self.ctxt, self.volume_id)
# Create backup of volume.
self.backup_id = str(uuid.uuid4())
self._create_backup_db_entry(self.backup_id, self.volume_id,
self.volume_size)
self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
self.backup.container = "backups"
# Create alternate volume.
self.alt_volume_id = str(uuid.uuid4())
self._create_volume_db_entry(self.alt_volume_id, self.volume_size)
self.alt_volume = db.volume_get(self.ctxt, self.alt_volume_id)
self.chunk_size = 1024
self.num_chunks = 128
self.data_length = self.num_chunks * self.chunk_size
self.checksum = hashlib.sha256()
# Create a file with some data in it.
self.volume_file = tempfile.NamedTemporaryFile()
self.addCleanup(self.volume_file.close)
for _i in range(0, self.num_chunks):
data = os.urandom(self.chunk_size)
self.checksum.update(data)
self.volume_file.write(data)
self.volume_file.seek(0)
# Always trigger an exception if a command is executed since it should
# always be dealt with gracefully. At time of writing on rbd
# export/import-diff is executed and if they fail we expect to find
# alternative means of backing up.
mock_exec = mock.Mock()
mock_exec.side_effect = processutils.ProcessExecutionError
self.service = ceph.CephBackupDriver(self.ctxt, execute=mock_exec)
# Ensure that time.time() always returns more than the last time it was
# called to avoid div by zero errors.
self.counter = float(0)
self.callstack = []
@common_mocks
def test_get_rbd_support(self):
del self.service.rbd.RBD_FEATURE_LAYERING
del self.service.rbd.RBD_FEATURE_STRIPINGV2
del self.service.rbd.RBD_FEATURE_EXCLUSIVE_LOCK
del self.service.rbd.RBD_FEATURE_JOURNALING
self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_LAYERING'))
self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_STRIPINGV2'))
self.assertFalse(hasattr(self.service.rbd,
'RBD_FEATURE_EXCLUSIVE_LOCK'))
self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_JOURNALING'))
oldformat, features = self.service._get_rbd_support()
self.assertTrue(oldformat)
self.assertEqual(0, features)
self.service.rbd.RBD_FEATURE_LAYERING = 1
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1, features)
self.service.rbd.RBD_FEATURE_STRIPINGV2 = 2
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1 | 2, features)
# initially, backup_ceph_image_journals = False. test that
# the flags are defined, but that they are not returned.
self.service.rbd.RBD_FEATURE_EXCLUSIVE_LOCK = 4
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1 | 2, features)
self.service.rbd.RBD_FEATURE_JOURNALING = 64
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1 | 2, features)
# test that the config setting properly sets the FEATURE bits.
# because journaling requires exclusive-lock, these are set
# at the same time.
CONF.set_override("backup_ceph_image_journals", True)
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1 | 2 | 4 | 64, features)
@common_mocks
def test_get_most_recent_snap(self):
last = 'backup.%s.snap.9824923.1212' % (uuid.uuid4())
image = self.mock_rbd.Image.return_value
with mock.patch.object(self.service, '_snap_exists') as \
mock_snap_exists:
mock_snap_exists.return_value = True
image.list_snaps.return_value = \
[{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': last},
{'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}]
base_name = "mock_base"
client = mock.Mock()
snap = self.service._get_most_recent_snap(image, base_name, client)
self.assertEqual(last, snap)
@common_mocks
def test_get_backup_snap_name(self):
snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4())
def get_backup_snaps(inst, *args):
return [{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4()),
'backup_id': str(uuid.uuid4())},
{'name': snap_name,
'backup_id': self.backup_id}]
with mock.patch.object(self.service, 'get_backup_snaps'):
name = self.service._get_backup_snap_name(self.service.rbd.Image(),
'base_foo',
self.backup_id)
self.assertIsNone(name)
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.side_effect = get_backup_snaps
name = self.service._get_backup_snap_name(self.service.rbd.Image(),
'base_foo',
self.backup_id)
self.assertEqual(snap_name, name)
self.assertTrue(mock_get_backup_snaps.called)
@common_mocks
def test_get_backup_snaps(self):
image = self.mock_rbd.Image.return_value
image.list_snaps.return_value = [
{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.wambam.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': 'bbbackup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}]
snaps = self.service.get_backup_snaps(image)
self.assertEqual(3, len(snaps))
@common_mocks
def test_transfer_data_from_rbd_to_file(self):
def fake_read(offset, length):
self.volume_file.seek(offset)
return self.volume_file.read(length)
self.mock_rbd.Image.return_value.read.side_effect = fake_read
self.mock_rbd.Image.return_value.size.return_value = self.data_length
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
self.service._transfer_data(rbd_io, 'src_foo', test_file,
'dest_foo', self.data_length)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_rbd_to_rbd(self):
def fake_read(offset, length):
self.volume_file.seek(offset)
return self.volume_file.read(length)
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
rbd1 = mock.Mock()
rbd1.read.side_effect = fake_read
rbd1.size.return_value = os.fstat(self.volume_file.fileno()).st_size
rbd2 = mock.Mock()
rbd2.write.side_effect = mock_write_data
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
src_rbd_io = self._get_wrapped_rbd_io(rbd1)
dest_rbd_io = self._get_wrapped_rbd_io(rbd2)
self.service._transfer_data(src_rbd_io, 'src_foo', dest_rbd_io,
'dest_foo', self.data_length)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_file_to_rbd(self):
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
self.mock_rbd.Image.return_value.write.side_effect = mock_write_data
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
self.service._transfer_data(self.volume_file, 'src_foo',
rbd_io, 'dest_foo', self.data_length)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_file_to_file(self):
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
self.service._transfer_data(self.volume_file, 'src_foo', test_file,
'dest_foo', self.data_length)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_backup_volume_from_file(self):
checksum = hashlib.sha256()
thread_dict = {}
def mock_write_data(data, offset):
checksum.update(data)
thread_dict['thread'] = threading.current_thread()
test_file.write(data)
self.service.rbd.Image.return_value.write.side_effect = mock_write_data
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, '_discard_bytes'):
with tempfile.NamedTemporaryFile() as test_file:
self.service.backup(self.backup, self.volume_file)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
self.assertTrue(self.service.rbd.Image.return_value.write.called)
self.assertNotEqual(threading.current_thread(), thread_dict['thread'])
@common_mocks
def test_get_backup_base_name(self):
name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.assertEqual("volume-%s.backup.base" % (self.volume_id), name)
self.assertRaises(exception.InvalidParameterValue,
self.service._get_backup_base_name,
self.volume_id)
name = self.service._get_backup_base_name(self.volume_id, '1234')
self.assertEqual("volume-%s.backup.%s" % (self.volume_id, '1234'),
name)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd(self, mock_popen, mock_fnctl):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
with mock.patch.object(self.service, '_full_backup') as \
mock_full_backup:
with mock.patch.object(self.service,
'_try_delete_base_image'):
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = linuxrbd.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = linuxrbd.RBDVolumeIOWrapper(meta)
mock_get_backup_snaps.return_value = (
[{'name': 'backup.mock.snap.153464362.12'},
{'name': 'backup.mock.snap.15341241.90'},
{'name': 'backup.mock.snap.199994362.10'}])
output = self.service.backup(self.backup, rbdio)
self.assertDictEqual({}, output)
self.assertEqual(['popen_init',
'read',
'popen_init',
'write',
'stdout_close',
'communicate'], self.callstack)
self.assertFalse(mock_full_backup.called)
self.assertTrue(mock_get_backup_snaps.called)
# Ensure the files are equal
self.assertEqual(checksum.digest(),
self.checksum.digest())
@common_mocks
def test_backup_volume_from_rbd_set_parent_id(self):
with mock.patch.object(self.service, '_backup_rbd') as \
mock_backup_rbd, mock.patch.object(self.service,
'_backup_metadata'):
mock_backup_rbd.return_value = {'parent_id': 'mock'}
image = self.service.rbd.Image()
meta = linuxrbd.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = linuxrbd.RBDVolumeIOWrapper(meta)
output = self.service.backup(self.backup, rbdio)
self.assertDictEqual({'parent_id': 'mock'}, output)
@common_mocks
def test_backup_volume_from_rbd_set_parent_id_none(self):
backup_name = self.service._get_backup_base_name(
self.backup_id, diff_format=True)
self.mock_rbd.RBD().list.return_value = [backup_name]
self.backup.parent_id = 'mock_parent_id'
with mock.patch.object(self.service, 'get_backup_snaps'), \
mock.patch.object(self.service, '_rbd_diff_transfer') as \
mock_rbd_diff_transfer:
def mock_rbd_diff_transfer_side_effect(src_name, src_pool,
dest_name, dest_pool,
src_user, src_conf,
dest_user, dest_conf,
src_snap, from_snap):
raise exception.BackupRBDOperationFailed(_('mock'))
# Raise a pseudo exception.BackupRBDOperationFailed.
mock_rbd_diff_transfer.side_effect \
= mock_rbd_diff_transfer_side_effect
with mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service,
'_try_delete_base_image'):
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
image = self.service.rbd.Image()
meta = linuxrbd.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = linuxrbd.RBDVolumeIOWrapper(meta)
mock_get_backup_snaps.return_value = (
[{'name': 'backup.mock.snap.153464362.12'},
{'name': 'backup.mock.snap.199994362.10'}])
output = self.service.backup(self.backup, rbdio)
self.assertIsNone(output['parent_id'])
@common_mocks
def test_backup_rbd_set_parent_id(self):
backup_name = self.service._get_backup_base_name(
self.backup_id, diff_format=True)
vol_name = self.volume.name
vol_length = self.volume.size
self.mock_rbd.RBD().list.return_value = [backup_name]
with mock.patch.object(self.service, '_snap_exists'), \
mock.patch.object(self.service, '_get_backup_base_name') as \
mock_get_backup_base_name, mock.patch.object(
self.service, '_get_most_recent_snap') as mock_get_most_recent_snap, \
mock.patch.object(self.service, '_rbd_diff_transfer'):
mock_get_backup_base_name.return_value = backup_name
mock_get_most_recent_snap.return_value = (
'backup.mock.snap.153464362.12')
image = self.service.rbd.Image()
meta = linuxrbd.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = linuxrbd.RBDVolumeIOWrapper(meta)
rbdio.seek(0)
output = self.service._backup_rbd(self.backup, rbdio,
vol_name, vol_length)
self.assertDictEqual({'parent_id': 'mock'}, output)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd_fail(self, mock_popen, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In _backup_rbd(), after an exception.BackupRBDOperationFailed
occurs in self._rbd_diff_transfer(), we want to check the
process when the second exception occurs in
self._try_delete_base_image().
"""
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = (
[{'name': 'backup.mock.snap.153464362.12'},
{'name': 'backup.mock.snap.199994362.10'}])
with mock.patch.object(self.service, '_rbd_diff_transfer') as \
mock_rbd_diff_transfer:
def mock_rbd_diff_transfer_side_effect(src_name, src_pool,
dest_name, dest_pool,
src_user, src_conf,
dest_user, dest_conf,
src_snap, from_snap):
raise exception.BackupRBDOperationFailed(_('mock'))
# Raise a pseudo exception.BackupRBDOperationFailed.
mock_rbd_diff_transfer.side_effect \
= mock_rbd_diff_transfer_side_effect
with mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service, '_try_delete_base_image') as \
mock_try_delete_base_image:
def mock_try_delete_base_image_side_effect(backup_id,
base_name):
raise self.service.rbd.ImageNotFound(_('mock'))
# Raise a pesudo exception rbd.ImageNotFound.
mock_try_delete_base_image.side_effect \
= mock_try_delete_base_image_side_effect
with mock.patch.object(self.service, '_backup_metadata'):
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = linuxrbd.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = linuxrbd.RBDVolumeIOWrapper(meta)
# We expect that the second exception is
# notified.
self.assertRaises(
self.service.rbd.ImageNotFound,
self.service.backup,
self.backup, rbdio)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd_fail2(self, mock_popen, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception.BackupOperationError occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete_backup().
"""
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = (
[{'name': 'backup.mock.snap.153464362.12'},
{'name': 'backup.mock.snap.199994362.10'}])
with mock.patch.object(self.service, '_rbd_diff_transfer'), \
mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service, '_backup_metadata') as \
mock_backup_metadata:
def mock_backup_metadata_side_effect(backup):
raise exception.BackupOperationError(_('mock'))
# Raise a pseudo exception.BackupOperationError.
mock_backup_metadata.side_effect = (
mock_backup_metadata_side_effect)
with mock.patch.object(self.service, 'delete_backup') as \
mock_delete:
def mock_delete_side_effect(backup):
raise self.service.rbd.ImageBusy()
# Raise a pseudo exception rbd.ImageBusy.
mock_delete.side_effect = mock_delete_side_effect
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = linuxrbd.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = linuxrbd.RBDVolumeIOWrapper(meta)
# We expect that the second exception is
# notified.
self.assertRaises(
self.service.rbd.ImageBusy,
self.service.backup,
self.backup, rbdio)
@common_mocks
def test_backup_rbd_from_snap(self):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
vol_name = self.volume['name']
vol_length = self.service._get_volume_size_gb(self.volume)
self.mock_rbd.RBD().list = mock.Mock()
self.mock_rbd.RBD().list.return_value = ['mock']
with mock.patch.object(self.service, '_get_new_snap_name') as \
mock_get_new_snap_name:
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
with mock.patch.object(self.service, '_rbd_diff_transfer') as \
mock_rbd_diff_transfer:
with mock.patch.object(self.service, '_get_backup_base_name') as \
mock_get_backup_base_name:
mock_get_backup_base_name.return_value = (
backup_name)
mock_get_backup_snaps.return_value = (
[{'name': 'backup.mock.snap.153464362.12'},
{'name': 'backup.mock.snap.15341241.90'},
{'name': 'backup.mock.snap.199994362.10'}])
mock_get_new_snap_name.return_value = 'new_snap'
image = self.service.rbd.Image()
meta = linuxrbd.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = linuxrbd.RBDVolumeIOWrapper(meta)
rbdio.seek(0)
self.service._backup_rbd(self.backup, rbdio,
vol_name, vol_length)
mock_rbd_diff_transfer.assert_called_with(
vol_name, 'pool_foo', backup_name,
self.backup.container, src_user='user_foo',
src_conf='conf_foo',
dest_conf='/etc/ceph/ceph.conf',
dest_user='cinder', src_snap='new_snap',
from_snap=None)
@common_mocks
def test_backup_rbd_from_snap2(self):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
vol_name = self.volume['name']
vol_length = self.service._get_volume_size_gb(self.volume)
self.mock_rbd.RBD().list = mock.Mock()
self.mock_rbd.RBD().list.return_value = [backup_name]
with mock.patch.object(self.service, '_get_most_recent_snap') as \
mock_get_most_recent_snap:
with mock.patch.object(self.service, '_get_backup_base_name') as \
mock_get_backup_base_name:
with mock.patch.object(self.service, '_rbd_diff_transfer') as \
mock_rbd_diff_transfer:
with mock.patch.object(self.service, '_get_new_snap_name') as \
mock_get_new_snap_name:
mock_get_backup_base_name.return_value = (
backup_name)
mock_get_most_recent_snap.return_value = (
'backup.mock.snap.153464362.12')
mock_get_new_snap_name.return_value = 'new_snap'
image = self.service.rbd.Image()
meta = linuxrbd.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = linuxrbd.RBDVolumeIOWrapper(meta)
rbdio.seek(0)
self.service._backup_rbd(self.backup, rbdio,
vol_name, vol_length)
mock_rbd_diff_transfer.assert_called_with(
vol_name, 'pool_foo', backup_name,
self.backup.container, src_user='user_foo',
src_conf='conf_foo',
dest_conf='/etc/ceph/ceph.conf',
dest_user='cinder', src_snap='new_snap',
from_snap='backup.mock.snap.153464362.12')
@common_mocks
def test_backup_vol_length_0(self):
volume_id = fake.VOLUME_ID
self._create_volume_db_entry(volume_id, 0)
backup_id = fake.BACKUP_ID
self._create_backup_db_entry(backup_id, volume_id, 1)
backup = objects.Backup.get_by_id(self.ctxt, backup_id)
self.assertRaises(exception.InvalidParameterValue, self.service.backup,
backup, self.volume_file)
@common_mocks
def test_backup_with_container_name(self):
volume_size = self.volume_size * units.Gi
backup_id = fake.BACKUP_ID
self._create_backup_db_entry(backup_id, self.volume_id, 1)
backup = objects.Backup.get_by_id(self.ctxt, backup_id)
backup.container = "test"
with mock.patch.object(
self.service, '_full_backup',
side_effect=exception.BackupOperationError()) as mock_full:
self.assertRaises(exception.BackupOperationError,
self.service.backup, backup, self.volume_file)
mock_full.assert_called_once_with(backup, self.volume_file,
self.volume.name, volume_size)
@common_mocks
def test_restore(self):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
thread_dict = {}
def mock_read_data(offset, length):
thread_dict['thread'] = threading.current_thread()
return self.volume_file.read(self.data_length)
self.mock_rbd.Image.return_value.read.side_effect = mock_read_data
self.mock_rbd.Image.return_value.size.return_value = \
self.chunk_size * self.num_chunks
with mock.patch.object(self.service, '_restore_metadata') as \
mock_restore_metadata:
with mock.patch.object(self.service, '_discard_bytes') as \
mock_discard_bytes:
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
self.service.restore(self.backup, self.volume_id,
test_file)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
self.assertTrue(mock_restore_metadata.called)
self.assertTrue(mock_discard_bytes.called)
self.assertTrue(mock_discard_bytes.called)
self.assertTrue(self.service.rbd.Image.return_value.read.called)
self.assertNotEqual(threading.current_thread(), thread_dict['thread'])
@common_mocks
def test_discard_bytes(self):
# Lower the chunksize to a memory manageable number
thread_dict = {}
self.service.chunk_size = 1024
image = self.mock_rbd.Image.return_value
wrapped_rbd = self._get_wrapped_rbd_io(image)
def mock_discard(offset, length):
thread_dict['thread'] = threading.current_thread()
return self.mock_rbd.Image.discard(offset, length)
self.mock_rbd.Image.return_value.discard.side_effect = mock_discard
self.service._discard_bytes(wrapped_rbd, 0, 0)
self.assertEqual(0, image.discard.call_count)
self.service._discard_bytes(wrapped_rbd, 0, 1234)
self.assertEqual(1, image.discard.call_count)
image.reset_mock()
# Test discard with no remainder
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
self.service._discard_bytes(wrapped_rbd, 0,
self.service.chunk_size * 2)
self.assertEqual(2, image.write.call_count)
self.assertEqual(2, image.flush.call_count)
self.assertFalse(image.discard.called)
zeroes = '\0' * self.service.chunk_size
image.write.assert_has_calls([mock.call(zeroes, 0),
mock.call(zeroes, self.chunk_size)])
self.assertNotEqual(threading.current_thread(),
thread_dict['thread'])
image.reset_mock()
image.write.reset_mock()
# Now test with a remainder.
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
self.service._discard_bytes(wrapped_rbd, 0,
(self.service.chunk_size * 2) + 1)
self.assertEqual(3, image.write.call_count)
self.assertEqual(3, image.flush.call_count)
self.assertFalse(image.discard.called)
image.write.assert_has_calls([mock.call(zeroes,
self.chunk_size * 2),
mock.call(zeroes,
self.chunk_size * 3),
mock.call('\0',
self.chunk_size * 4)])
@common_mocks
def test_delete_backup_snapshot(self):
snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4())
base_name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.mock_rbd.RBD.remove_snap = mock.Mock()
thread_dict = {}
def mock_side_effect(snap):
thread_dict['thread'] = threading.current_thread()
self.mock_rbd.Image.return_value.remove_snap.side_effect = \
mock_side_effect
with mock.patch.object(self.service, '_get_backup_snap_name') as \
mock_get_backup_snap_name:
mock_get_backup_snap_name.return_value = snap_name
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = None
rem = self.service._delete_backup_snapshot(self.mock_rados,
base_name,
self.backup_id)
self.assertTrue(mock_get_backup_snap_name.called)
self.assertTrue(mock_get_backup_snaps.called)
self.assertEqual((snap_name, 0), rem)
self.assertNotEqual(threading.current_thread(),
thread_dict['thread'])
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_try_delete_base_image_diff_format(self, mock_meta_backup):
backup_name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
with mock.patch.object(self.service, '_delete_backup_snapshot') as \
mock_del_backup_snap:
snap_name = self.service._get_new_snap_name(self.backup_id)
mock_del_backup_snap.return_value = (snap_name, 0)
self.service.delete_backup(self.backup)
self.assertTrue(mock_del_backup_snap.called)
self.assertTrue(self.mock_rbd.RBD.return_value.list.called)
self.assertTrue(self.mock_rbd.RBD.return_value.remove.called)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_try_delete_base_image(self, mock_meta_backup):
backup_name = self.service._get_backup_base_name(self.volume_id,
self.backup_id)
thread_dict = {}
def mock_side_effect(ioctx, base_name):
thread_dict['thread'] = threading.current_thread()
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
self.mock_rbd.RBD.return_value.remove.side_effect = mock_side_effect
with mock.patch.object(self.service, 'get_backup_snaps'):
self.service.delete_backup(self.backup)
self.assertTrue(self.mock_rbd.RBD.return_value.remove.called)
self.assertNotEqual(threading.current_thread(),
thread_dict['thread'])
@common_mocks
def test_try_delete_base_image_busy(self):
"""This should induce retries then raise rbd.ImageBusy."""
backup_name = self.service._get_backup_base_name(self.volume_id,
self.backup_id)
rbd = self.mock_rbd.RBD.return_value
rbd.list.return_value = [backup_name]
rbd.remove.side_effect = self.mock_rbd.ImageBusy
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
self.assertRaises(self.mock_rbd.ImageBusy,
self.service._try_delete_base_image,
self.backup)
self.assertTrue(mock_get_backup_snaps.called)
self.assertTrue(rbd.list.called)
self.assertTrue(rbd.remove.called)
self.assertIn(MockImageBusyException, RAISED_EXCEPTIONS)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_delete_image_not_found(self, mock_meta_backup):
with mock.patch.object(self.service, '_try_delete_base_image') as \
mock_del_base:
mock_del_base.side_effect = self.mock_rbd.ImageNotFound
# ImageNotFound exception is caught so that db entry can be cleared
self.service.delete_backup(self.backup)
self.assertEqual([MockImageNotFoundException], RAISED_EXCEPTIONS)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_delete_pool_not_found(self, mock_meta_backup):
with mock.patch.object(
self.service, '_try_delete_base_image') as mock_del_base:
mock_del_base.side_effect = self.mock_rados.ObjectNotFound
# ObjectNotFound exception is caught so that db entry can be
# cleared
self.service.delete_backup(self.backup)
self.assertEqual([MockObjectNotFoundException],
RAISED_EXCEPTIONS)
mock_del_base.assert_called_once_with(self.backup)
mock_meta_backup.return_value.remove_if_exists.assert_not_called()
@common_mocks
def test_diff_restore_allowed_with_image_not_exists(self):
"""Test diff restore not allowed when backup not diff-format."""
not_allowed = (False, None)
backup_base = 'backup.base'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (False, backup_base)
resp = self.service._diff_restore_allowed(*args_vols_different)
self.assertEqual(not_allowed, resp)
mock_rbd_image_exists.assert_called_once_with(
backup_base,
self.backup['volume_id'],
self.mock_rados)
@common_mocks
def test_diff_restore_allowed_with_no_restore_point(self):
"""Test diff restore not allowed when no restore point found.
Detail conditions:
1. backup base is diff-format
2. restore point does not exist
"""
not_allowed = (False, None)
backup_base = 'backup.base'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = None
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual(not_allowed, resp)
self.assertTrue(mock_rbd_image_exists.called)
mock_get_restore_point.assert_called_once_with(
backup_base,
self.backup['id'])
@common_mocks
def test_diff_restore_allowed_with_not_rbd(self):
"""Test diff restore not allowed when destination volume is not rbd.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is not an rbd.
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
mock_file_is_rbd.assert_called_once_with(
rbd_io)
@common_mocks
def test_diff_restore_allowed_with_same_volume(self):
"""Test diff restore not allowed when volumes are same.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are the same
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_same = [backup_base, self.backup, self.volume, rbd_io,
self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
resp = self.service._diff_restore_allowed(*args_vols_same)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
@common_mocks
def test_diff_restore_allowed_with_has_extents(self):
"""Test diff restore not allowed when destination volume has data.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are different
5. destination volume has data on it - full copy is mandated
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
with mock.patch.object(self.service, '_rbd_has_extents') \
as mock_rbd_has_extents:
mock_rbd_has_extents.return_value = True
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
mock_rbd_has_extents.assert_called_once_with(
rbd_io.rbd_image)
@common_mocks
def test_diff_restore_allowed_with_no_extents(self):
"""Test diff restore allowed when no data in destination volume.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are different
5. destination volume no data on it
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
with mock.patch.object(self.service, '_rbd_has_extents') \
as mock_rbd_has_extents:
mock_rbd_has_extents.return_value = False
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((True, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
self.assertTrue(mock_rbd_has_extents.called)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_piped_execute(self, mock_popen, mock_fcntl):
mock_fcntl.return_value = 0
self._setup_mock_popen(mock_popen, ['out', 'err'])
self.service._piped_execute(['foo'], ['bar'])
self.assertEqual(['popen_init', 'popen_init',
'stdout_close', 'communicate'], self.callstack)
@common_mocks
def test_restore_metdata(self):
version = 2
def mock_read(*args):
base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META
glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META
return jsonutils.dumps({base_tag: {'image_name': 'image.base'},
glance_tag: {'image_name': 'image.glance'},
'version': version})
self.mock_rados.Object.return_value.read.side_effect = mock_read
self.service._restore_metadata(self.backup, self.volume_id)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.assertTrue(self.mock_rados.Object.return_value.read.called)
version = 3
try:
self.service._restore_metadata(self.backup, self.volume_id)
except exception.BackupOperationError as exc:
msg = _("Metadata restore failed due to incompatible version")
self.assertEqual(msg, six.text_type(exc))
else:
# Force a test failure
self.assertFalse(True)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_backup_metadata_already_exists(self, mock_meta_backup):
def mock_set(json_meta):
msg = (_("Metadata backup object '%s' already exists") %
("backup.%s.meta" % (self.backup_id)))
raise exception.VolumeMetadataBackupExists(msg)
mock_meta_backup.return_value.set = mock.Mock()
mock_meta_backup.return_value.set.side_effect = mock_set
with mock.patch.object(self.service, 'get_metadata') as \
mock_get_metadata:
mock_get_metadata.return_value = "some.json.metadata"
try:
self.service._backup_metadata(self.backup)
except exception.BackupOperationError as e:
msg = (_("Failed to backup volume metadata - Metadata backup "
"object 'backup.%s.meta' already exists") %
(self.backup_id))
self.assertEqual(msg, six.text_type(e))
else:
# Make the test fail
self.assertFalse(True)
self.assertFalse(mock_meta_backup.set.called)
@common_mocks
def test_backup_metadata_error(self):
"""Ensure that delete_backup() is called if the metadata backup fails.
Also ensure that the exception is propagated to the caller.
"""
with mock.patch.object(self.service, '_backup_metadata') as \
mock_backup_metadata:
mock_backup_metadata.side_effect = exception.BackupOperationError
with mock.patch.object(self.service, '_get_volume_size_gb'):
with mock.patch.object(self.service, '_file_is_rbd',
return_value=False):
with mock.patch.object(self.service, '_full_backup'):
with mock.patch.object(self.service, 'delete_backup') as \
mock_delete:
self.assertRaises(exception.BackupOperationError,
self.service.backup, self.backup,
mock.Mock(),
backup_metadata=True)
self.assertTrue(mock_delete.called)
@common_mocks
def test_restore_invalid_metadata_version(self):
def mock_read(*args):
base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META
glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META
return jsonutils.dumps({base_tag: {'image_name': 'image.base'},
glance_tag: {'image_name': 'image.glance'},
'version': 3})
self.mock_rados.Object.return_value.read.side_effect = mock_read
with mock.patch.object(ceph.VolumeMetadataBackup, '_exists') as \
mock_exists:
mock_exists.return_value = True
self.assertRaises(exception.BackupOperationError,
self.service._restore_metadata,
self.backup, self.volume_id)
self.assertTrue(mock_exists.called)
self.assertTrue(self.mock_rados.Object.return_value.read.called)
@ddt.data((None, False),
([{'name': 'test'}], False),
([{'name': 'test'}, {'name': 'fake'}], True))
@ddt.unpack
@common_mocks
def test__snap_exists(self, snapshots, snap_exist):
client = mock.Mock()
thread_dict = {}
with mock.patch.object(self.service.rbd.Image(),
'list_snaps') as snaps:
snaps.return_value = snapshots
def mock_side_effect():
thread_dict['thread'] = threading.current_thread()
return snaps.return_value
snaps.side_effect = mock_side_effect
exist = self.service._snap_exists(None, 'fake', client)
self.assertEqual(snap_exist, exist)
self.assertNotEqual(thread_dict['thread'],
threading.current_thread())
def common_meta_backup_mocks(f):
"""Decorator to set mocks common to all metadata backup tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd):
inst.mock_rados = mock_rados
inst.mock_rbd = mock_rbd
inst.mock_rados.ObjectNotFound = MockObjectNotFoundException
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
class VolumeMetadataBackupTestCase(test.TestCase):
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(VolumeMetadataBackupTestCase, self).setUp()
self.backup_id = str(uuid.uuid4())
self.mb = ceph.VolumeMetadataBackup(mock.Mock(), self.backup_id)
@common_meta_backup_mocks
def test_name(self):
self.assertEqual('backup.%s.meta' % (self.backup_id), self.mb.name)
@common_meta_backup_mocks
def test_exists(self):
thread_dict = {}
def mock_side_effect():
thread_dict['thread'] = threading.current_thread()
# True
self.mock_rados.Object.return_value.stat.side_effect = mock_side_effect
self.assertTrue(self.mb.exists)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.mock_rados.Object.return_value.reset_mock()
self.assertNotEqual(thread_dict['thread'], threading.current_thread())
# False
self.mock_rados.Object.return_value.stat.side_effect = (
self.mock_rados.ObjectNotFound)
self.assertFalse(self.mb.exists)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS)
@common_meta_backup_mocks
def test_set(self):
obj_data = []
called = []
thread_dict = {}
def mock_read(*args):
called.append('read')
self.assertEqual(1, len(obj_data))
return obj_data[0]
def _mock_write(data):
obj_data.append(data)
called.append('write')
thread_dict['thread'] = threading.current_thread()
self.mb.get = mock.Mock()
self.mb.get.side_effect = mock_read
with mock.patch.object(ceph.VolumeMetadataBackup, 'set') as mock_write:
mock_write.side_effect = _mock_write
self.mb.set({'foo': 'bar'})
self.assertEqual({'foo': 'bar'}, self.mb.get())
self.assertTrue(self.mb.get.called)
self.mb._exists = mock.Mock()
self.mb._exists.return_value = True
# use the unmocked set() method.
self.assertRaises(exception.VolumeMetadataBackupExists,
self.mb.set, {'doo': 'dah'})
# check the meta obj state has not changed.
self.assertEqual({'foo': 'bar'}, self.mb.get())
self.assertEqual(['write', 'read', 'read'], called)
self.mb._exists.return_value = False
self.mb.set({'doo': 'dah'})
self.assertNotEqual(thread_dict['thread'],
threading.current_thread)
@common_meta_backup_mocks
def test_get(self):
self.mock_rados.Object.return_value.stat.side_effect = (
self.mock_rados.ObjectNotFound)
self.mock_rados.Object.return_value.read.return_value = 'meta'
self.assertIsNone(self.mb.get())
self.mock_rados.Object.return_value.stat.side_effect = None
self.assertEqual('meta', self.mb.get())
@common_meta_backup_mocks
def remove_if_exists(self):
thread_dict = {}
def mock_side_effect():
thread_dict['thread'] = threading.current_thread()
with mock.patch.object(self.mock_rados.Object, 'remove') as \
mock_remove:
mock_remove.side_effect = self.mock_rados.ObjectNotFound
self.mb.remove_if_exists()
self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS)
self.mock_rados.Object.remove.side_effect = mock_side_effect
self.mb.remove_if_exists()
self.assertEqual([], RAISED_EXCEPTIONS)
self.assertNotEqual(thread_dict['thread'],
threading.current_thread)
| |
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2012 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
# Replacement for lazy loading stuff in upstream six. See scipy gh-2764
if PY3:
import builtins
import functools
reduce = functools.reduce
zip = builtins.zip
xrange = builtins.range
else:
import __builtin__
import itertools
builtins = __builtin__
reduce = __builtin__.reduce
zip = itertools.izip
xrange = __builtin__.xrange
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Proxy models for augmenting our source data tables with methods useful for processing.
"""
import datetime
from django.db import transaction
from django.db.models import Count
from .proxies import OCDCandidacyProxy, OCDPersonProxy
def merge_persons(persons):
"""
Merge items in persons iterable into one Person object, which is returned.
"""
# each person will be merged into this one
keep = persons.pop(0)
if keep.__class__ != OCDPersonProxy:
keep.__class__ = OCDPersonProxy
# loop over all the rest
for i in persons:
if i.__class__ != OCDPersonProxy:
i.__class__ = OCDPersonProxy
merge(keep, i)
keep.refresh_from_db()
dedupe_person_ids(keep)
dedupe_person_candidacies(keep)
keep.refresh_from_db()
# make sure Person name is same as most recent candidate_name
latest_candidate_name = keep.candidacies.latest(
'contest__election__date',
).candidate_name
if keep.name != latest_candidate_name:
# move current Person.name into other_names
keep.add_other_name(keep.name, 'Updated current name in merge')
keep.name = latest_candidate_name
keep.save()
return keep
def dedupe_person_ids(person):
"""
Remove duplicate PersonIdentifier objects linked to person.
"""
filer_ids = person.identifiers.filter(scheme='calaccess_filer_id')
dupe_filer_ids = filer_ids.values("identifier").annotate(
row_count=Count('id'),
).order_by().filter(row_count__gt=1)
for i in dupe_filer_ids.all():
# delete all rows with that filer_id
person.identifiers.filter(identifier=i['identifier']).delete()
# then re-add the one
person.identifiers.create(
scheme='calaccess_filer_id',
identifier=i['identifier'],
)
return person
def dedupe_person_candidacies(person):
"""
Remove duplicate Candidacy objects linked to person.
"""
# first, make groups by contests with more than one candidacy
contest_group_q = person.candidacies.values("contest").annotate(
row_count=Count('id')
).filter(row_count__gt=1)
# loop over each contest group
for group in contest_group_q.all():
cands = person.candidacies.filter(contest=group['contest'])
# preference to "qualified" candidacy (from scrape)
if cands.filter(registration_status='qualified').exists():
cand_to_keep = cands.filter(registration_status='qualified').all()[0]
# or the one with the most recent filed_date
else:
cand_to_keep = cands.latest('filed_date')
cand_to_keep.__class__ = OCDCandidacyProxy
# loop over all the other candidacies in the group
for cand_to_discard in cands.exclude(id=cand_to_keep.id).all():
# assuming there's nothing else to preserve in extras
# besides form501_filing_ids
if 'form501_filing_ids' in cand_to_discard.extras:
for i in cand_to_discard.extras['form501_filing_ids']:
cand_to_keep.link_form501(i)
cand_to_keep.refresh_from_db()
if 'form501_filing_ids' in cand_to_keep.extras:
cand_to_keep.update_from_form501()
cand_to_keep.refresh_from_db()
# keep the candidate_name, if not already somewhere else
if (
cand_to_discard.candidate_name != cand_to_keep.candidate_name
and cand_to_discard.candidate_name != cand_to_keep.person.name
and not cand_to_keep.person.other_names.filter(
name=cand_to_discard.candidate_name
).exists()
):
person.other_names.create(
name=cand_to_discard.candidate_name,
note='From merge of %s candidacies' % cand_to_keep.contest
)
cand_to_keep.refresh_from_db()
# keep the candidacy sources
if cand_to_discard.sources.exists():
for source in cand_to_discard.sources.all():
if not cand_to_keep.sources.filter(url=source.url).exists():
cand_to_keep.sources.create(
url=source.url,
note=source.note,
)
cand_to_keep.refresh_from_db()
# keep earliest filed_date
if cand_to_keep.filed_date and cand_to_discard.filed_date:
if cand_to_keep.filed_date > cand_to_discard.filed_date:
cand_to_keep.filed_date = cand_to_discard.filed_date
elif cand_to_discard.filed_date:
cand_to_keep.filed_date = cand_to_discard.filed_date
# keep is_incumbent if True
if not cand_to_keep.is_incumbent and cand_to_discard.is_incumbent:
cand_to_keep.is_incumbent = cand_to_discard.is_incumbent
# assuming not trying to merge candidacies with different parties
if not cand_to_keep.party and cand_to_discard.party:
cand_to_keep.party = cand_to_discard.party
cand_to_keep.save()
cand_to_discard.delete()
return person
def compute_diff(obj1, obj2):
"""
Given two objects compute a list of differences.
Each diff dict has the following keys:
field - name of the field
new - the new value for the field
one - value of the field in obj1
two - value of the field in obj2
diff - none|one|two|new
list - true if field is a list of related objects
"""
comparison = []
fields = obj1._meta.get_fields()
exclude = ('created_at', 'updated_at', 'id', 'locked_fields')
if obj1 == obj2:
raise ValueError('cannot merge object with itself')
for field in fields:
if field.name in exclude:
continue
elif not field.is_relation:
piece_one = getattr(obj1, field.name)
piece_two = getattr(obj2, field.name)
if piece_one == piece_two:
diff = 'none'
new = piece_one
elif piece_one:
diff = 'one'
new = piece_one
elif piece_two:
diff = 'two'
new = piece_two
comparison.append({
'field': field.name,
'new': new,
'one': getattr(obj1, field.name),
'two': getattr(obj2, field.name),
'diff': diff,
'list': False,
})
else:
related_name = field.get_accessor_name()
piece_one = list(getattr(obj1, related_name).all())
piece_two = list(getattr(obj2, related_name).all())
# TODO: try and deduplicate the lists?
new = piece_one + piece_two
diff = 'none' if piece_one == piece_two else 'one'
if (field.name == 'other_names' and obj1.name != obj2.name):
new.append(field.related_model(name=obj2.name,
note='from merge w/ ' + obj2.id)
)
diff = 'new'
if field.name == 'identifiers':
new.append(field.related_model(identifier=obj2.id))
diff = 'new'
if field.name == 'memberships':
new = _dedupe_memberships(new)
comparison.append({
'field': related_name,
'new': new,
'one': piece_one,
'two': piece_two,
'diff': diff,
'list': True,
})
comparison.append({'field': 'created_at',
'new': min(obj1.created_at, obj2.created_at),
'one': obj1.created_at,
'two': obj2.created_at,
'diff': 'one' if obj1.created_at < obj2.created_at else 'two',
'list': False,
})
comparison.append({'field': 'updated_at',
'new': datetime.datetime.utcnow(),
'one': obj1.updated_at,
'two': obj2.updated_at,
'diff': 'new',
'list': False,
})
# locked fields are any fields that change that aren't M2M relations
# (ending in _set)
new_locked_fields = obj1.locked_fields + obj2.locked_fields + [
c['field'] for c in comparison if c['diff'] != 'none' and not c['field'].endswith('_set')
]
new_locked_fields = set(new_locked_fields) - {'updated_at', 'created_at'}
comparison.append({'field': 'locked_fields',
'new': list(new_locked_fields),
'one': obj1.locked_fields,
'two': obj2.updated_at,
'diff': 'new',
'list': False,
})
return comparison
@transaction.atomic
def apply_diff(obj1, obj2, diff):
for row in diff:
if row['diff'] != 'none':
if row['list']:
# save items, the ids have been set to obj1
for item in row['new']:
setattr(item,
getattr(obj1, row['field']).field.name,
obj1)
item.save()
else:
setattr(obj1, row['field'], row['new'])
obj1.save()
count, delete_plan = obj2.delete()
if count > 1:
# shouldn't happen, but let's be sure
raise AssertionError('deletion failed due to related objects left unmerged')
def merge(obj1, obj2):
diff = compute_diff(obj1, obj2)
apply_diff(obj1, obj2, diff)
def _dedupe_memberships(memberships):
deduped = []
mset = set()
for membership in memberships:
mkey = (membership.organization_id,
membership.label,
membership.end_date,
membership.post_id)
if mkey not in mset:
deduped.append(membership)
mset.add(mkey)
else:
membership.delete()
return deduped
| |
"""String-based code generation utilities."""
import re
import cypy
## Code generator
class CG(object):
"""Provides a simple, flexible code generator."""
@cypy.autoinit
def __init__(self, processor=None,
code_builder=cypy.new[list],
convert=str,
fix_indentation=cypy.fix_indentation,
indent_depth=0,
default_indent=4,
fork_arguments=("processor",
"convert",
"fix_indentation",
"default_indent")): pass
## Basic API
def append(self, code):
"""Core API method for appending to the source code stream.
It can take the following as input.
*Strings*
The processor is called if specified. String values from the
processed stream are added after newlines are alided and indented.
Other values are recursed on.
Multiple adjacent newlines which straddle appends are alided to
produce a single newline. To insert multiple newlines, they must
be adjacent in the same string passed to append.
*Callables*
Callables taking no arguments are called and their return value
recursed on if not ``None`` or ``self``.
Callables taking one argument are called with ``self`` and their
return value is recursed on if not ``None`` or ``self``.
*Iterables*
The items recursed on.
*Expressions*
If ``code._CG_expression`` is defined, that value is recursed on.
If ``code._CG_context`` is defined, its value will be appended to
the processor using ``append``, if possible, while recursing.
*Convertables*
See :data:`convert`.
"""
# support one-shot push and pop of dictionaries using operators
pop_next = self._pop_next
if pop_next:
self._pop_next = False
if isinstance(code, str):
# Strings are processed, then indented appropriately
for token in self._process(code):
prev = self.last_string
prev_ends_with_nl = prev is None or prev.endswith('\n')
token_starts_with_nl = token.startswith("\n")
indent_depth = self.indent_depth
if prev_ends_with_nl:
if indent_depth > 0:
self.code_builder.append(self.indent_str)
if token_starts_with_nl:
token = token[1:]
if indent_depth > 0:
token = cypy.re_nonend_newline.sub(
"\n" + self.indent_str, token)
if token != "":
self.code_builder.append(token)
else: self._process_nonstrings(code)
if pop_next:
self.pop_context()
return self
@classmethod
def append_once(cls, code, **kwargs):
"""One-off code generation using append.
If keyword args are provided, initialized using
:meth:`with_id_processor`.
"""
if kwargs:
g = cls.with_id_processor()
g._append_context(kwargs)
else:
g = cls()
g.append(code)
return g.code
def _process_nonstrings(self, code):
if code is not None and code is not self:
expr = getattr(code, '_CG_expression', None)
if expr is not None:
# Push the value's context onto stack
context = getattr(code, '_CG_context', None)
if context:
self._append_context(context)
else:
context = None
self.append(expr)
if context:
self.pop_context()
elif cypy.is_callable(code):
code = self._call_callable(code)
if code is not None and code is not self:
self.append(code)
elif cypy.is_iterable(code):
for item in code:
self.append(item)
else:
self.append(self._convert(code))
def extend(self, code):
"""Appends each item in code, which should be iterable.
.. Note:: Since :meth:`append` supports iterables, you can always just
use that. This method is here to complete the list analogy.
"""
self.append(code)
return self
def lines(self, code):
"""Fixes indentation for multiline strings before appending."""
if isinstance(code, str):
fix_indentation = self.fix_indentation
if fix_indentation:
code = fix_indentation(code)
return self.append(code)
else:
return self.append(code)
@classmethod
def lines_once(cls, code, **kwargs):
"""One-off code generation using :meth:`lines`.
If keyword args are provided, initialized using
:meth:`with_id_processor`.
"""
if kwargs:
g = cls.with_id_processor()
g._append_context(kwargs)
else:
g = cls()
g.lines(code)
return g.code
fix_indentation = cypy.fix_indentation
"""Called by lines to fix indentation before passing on to append.
Defaults to :func:`cypy.fix_indentation`. Called with the code.
Should return a left-justified string.
"""
processor = None
"""Called with strings to append. Should return an iterator.
If not specified, acts as if it yielded the input without modification.
"""
@classmethod
def with_id_processor(cls, *args):
"""Returns a new instance with the processor set to a new instance
of :class:`IdentifierProcessor`, appropriately initialized to process
non-strings appropriately.
"""
ip = IdentifierProcessor()
g = cls(ip, *args)
ip.nonstring_processor = lambda _, substitution: \
g._process_nonstrings(substitution)
return g
convert = str
"""Called on values not matching other categories in append().
The result is recursed on if not None. Defaults to str().
"""
code_builder = [ ]
"""The list of string appends so far."""
@property
def last_string(self):
"""The last entry in code_builder, or ``None`` if none so far."""
cb = self.code_builder
len_cb = len(cb)
if len_cb > 0:
return cb[len_cb - 1]
else:
return None
@property
def code(self):
"""Returns the concatenated list of strings upon access."""
return "".join(self.code_builder)
## Indentation management
indent_depth = 0
"""The current indentation depth, in spaces."""
default_indent = 4
"""The number of spaces to indent by default. Defaults to 4."""
@property
def indent_str(self):
"""The current indent string. ``" "*self.indent_depth``"""
return " "*self.indent_depth
@staticmethod
def tab(g):
"""A token which will increase the indent depth by the default amount
if added to the stream."""
g.indent_depth += g.default_indent
@staticmethod
def untab(g):
"""A token which will decrease the indent depth by the default amount
if added to the stream"""
g.indent_depth -= g.default_indent
## Internals
def _process(self, expr):
processor = self.processor
if processor is not None:
for processed in processor(expr):
yield processed
else:
yield expr
def _call_callable(self, code):
nargs = cypy.fn_minimum_argcount(code)
if nargs == 0:
rv = code()
elif nargs == 1:
rv = code(self)
else:
raise ValueError("Callable must take either 0 or 1 arguments without defaults.")
return rv
def _convert(self, code):
if self.convert is not None:
return self.convert(code)
## Operators API
def __lshift__(self, right):
self.append(right)
return self
def __rrshift__(self, left):
self.append(left)
return self
def __rshift__(self, right):
self.lines(right)
return self
def __rlshift__(self, left):
self.lines(left)
return self
## Processor Contexts API [Experimental]
def __call__(self, **context):
if context:
self.append_context(**context)
self._pop_next = True
return self
_pop_next = False
def append_context(self, **context):
"""Appends the provided keyword arguments to the processor."""
self._append_context(context)
def _append_context(self, context):
processor = getattr(self, 'processor', None)
if processor is not None:
append_context = getattr(processor, 'append_context', None)
if append_context is None:
append_context = getattr(processor, 'append', None)
if append_context is not None:
append_context(context)
def pop_context(self):
"""Pops the last set of keyword arguments provided to the processor."""
processor = getattr(self, 'processor', None)
if processor is not None:
pop_context = getattr(processor, 'pop_context', None)
if pop_context is None:
pop_context = getattr(processor, 'pop', None)
if pop_context is not None:
return pop_context()
if self._pop_next:
self._pop_next = False
## Processing identifiers
class IdentifierProcessor(object):
"""Breaks a string into identifiers and replaces them using the substitutor.
>>> ip = IdentifierProcessor(stack_lookup(stack({'chocolate': 'pudding'})))
>>> ip('who wants chocolate?')
'who wants pudding?'
"""
@cypy.autoinit
def __init__(self,
substitutor=cypy.new[cypy.stack_lookup],
recursive=False,
exclude=cypy.new[set],
nonstring_processor=None):
pass
substitutor = None
"""The map to look up identifiers in for their substitution.
KeyErrors result in the identifier being left unchanged.
"""
recursive = True
"""If True, string substitutions are recursed on until they yield unchanged
values.
The exclude stack prevents some classes of runaway recursion by excluding
the identifier being recursed on.
"""
exclude = None
"""The set of identifiers to exclude from replacement.
Used internally if recursive is True.
"""
nonstring_processor = None
"""The function to call on non-strings that are returned by the substitutor.
Called with ``token, substitution``.
If ``None``, the non-string is yielded directly.
If the return value is not None, it is yielded.
The result is not recursed on.
"""
def __call__(self, code):
recursive = self.recursive
re_non_identifier = self.re_non_identifier
nonstring_processor = self.nonstring_processor
for token in re_non_identifier.split(code):
if token in self.exclude or re_non_identifier.match(token):
# don't molest tokens in exclude list and non-identifiers
# (this could be more efficient if we tested the first value
# then alternated instead of checking each one)
yield token
else:
if token == "": continue
if recursive: cypy.include(self.exclude, token)
try:
substitution = self.substitutor[token]
except KeyError:
yield token
else:
if recursive and isinstance(substitution, str):
for final in self(substitution):
yield final
elif substitution is not None:
if nonstring_processor is not None:
substitution = nonstring_processor(token,
substitution)
if substitution is not None: yield substitution
else:
yield substitution
else:
yield token
if recursive: cypy.remove_once(self.exclude, token)
re_non_identifier = re.compile(r"(\W+)")
"""A regular expression to match non-identifiers for use with re.split."""
def append(self, dict):
"""``append`` the provided ``dict`` to the ``substitutor``.
Will return an AttributeError if the substitutor does not support
append. The default substutitor, a :class:`cypy.stack_lookup`,
does.
"""
self.substitutor.append(dict)
def pop(self):
"""``pop`` the last item appended to the ``substitutor``."""
return self.substitutor.pop()
class Partitioner(object):
"""Use the code generator to create an efficient if/else if/else block
against a numeric variable.
Removes redundant checks against previous blocks or the min and max that
a simple strategy might include.
Example:
>>> g = CG()
>>> p = Partitioner(g.append, "rank", min_start=0, max_end=1000)
>>> p.next(start=10, end=100, code='almost()')
>>> p.next(move=100, code='not_bad()')
>>> p.next(start=500, code='you_need_practice()')
>>> print g.code
if 10 <= rank < 100:
almost()
elif rank < 200:
not_bad()
elif rank >= 500:
you_need_practice()
Notes:
- Ranges must be ordered (i.e. start must be >= the previous end.)
- Ranges are checked Python style, that is against [start, end).
"""
@cypy.autoinit
def __init__(self, callback=None, var_name=None, min_start=0,
max_end=cypy.inf):
pass
callback = None
"""Callback for appends."""
var_name = None
"""Comparison variable name."""
min_start = None
"""The lower bound."""
max_end = None
"""The upper bound. Use cypy.inf for none."""
_index = 0
_prev_end = cypy.NotInitialized
## Language-related
if_start = "\nif "
"""The string which begins an if statement. Defaults to ``'\\nif '``."""
else_if_start = "elif "
"""The string which begins an else if statement. Defaults to ``'elif '``."""
guard_end = (":\n", CG.tab)
"""The string which ends a guard. Defaults to ``(":\\n", CG.tab)``."""
else_start = ("else:\n", CG.tab)
"""The string which begins an else statement. Defaults to ``('else:\\n', CG.tab)``."""
block_end = (CG.untab, "\n")
"""The string which ends a block. Defaults to ``(CG.untab, '\\n')``."""
@staticmethod
def lt(left, right):
"""Generates the expression to compare two operands using ``<``."""
return "%s < %s" % (left, right)
@staticmethod
def gte(left, right):
"""Generates the expression to compare two operands using ``>=``."""
return "%s >= %s" % (left, right)
@staticmethod
def range(left, middle, right):
"""Generates the expression to compare a value in a range.
Defaults to ``left <= middle < right``.
"""
return "%s <= %s < %s" % (left, middle, right)
def _guard(self, code, conditional, condition):
self.callback((conditional, condition, self.guard_end,
code,
self.block_end))
def _else(self, code):
self.callback((self.else_start, code, self.block_end))
def next(self, move=None, start=None, end=None, code=None):
if start is None: start = self._prev_end
if move is None:
if end is None: end = self.max_end
else:
assert end is None
end = start + move
assert start >= self.min_start
assert end <= self.max_end
index = self._index
if index == 0:
cur_conditional = self.if_start
else:
cur_conditional = self.else_if_start
if self._prev_end is cypy.NotInitialized:
self._prev_end = self.min_start
if start == self._prev_end:
# No first check needed
if end == self.max_end:
# No last check needed
if index == 0:
# No block needed
self.callback(code)
else:
# Final else
self._else(code)
else:
# Only last check needed
self._guard(code, cur_conditional,
self.lt(self.var_name, str(end)))
else:
if end == self.max_end:
# Only first check needed
condition = self.gte(self.var_name, str(start))
else:
# Both checks needed
condition = self.range(str(start), self.var_name, str(end))
self._guard(code, cur_conditional, condition)
self._index += 1
self._prev_end = end
## Generation trees
class Node(cypy.BidirectionalTree, cypy.Naming):
"""Base node class for a generation tree.
.. Note:: These aren't abstract syntax trees. They decribe components of
a program. If you want software engineering mumbo-jumbo, try to
read about `Frame Technology
<http://en.wikipedia.org/wiki/Frame_Technology_%28software_engineering%29>`_.
"""
@cypy.autoinit
def __init__(self, parent, basename, **kwargs): #@UnusedVariable
self.__appended_context = { }
def _make_code_generator(self):
# Simple class which maintains a list of strings to concatenate at the
# end to produce code and provides some useful helper functions so
# indentation isn't messed up and such.
g = CG.with_id_processor()
g.processor.recursive = True
return g
def trigger_cg_hook(self, name, g, header=None, *args, **kwargs):
if header is not None:
g << "# ___" + header + "___\n"
appended_context = False
if g not in self.__appended_context: # check so there aren't extra copies wasting time
g._append_context(self._name_lookup)
self.__appended_context[g] = appended_context = True
method = getattr(self, name, None)
if cypy.is_callable(method):
method(g, *args, **kwargs)
try:
children = self.children
except AttributeError:
return
else:
if children is not None:
for child in children:
method = getattr(child, 'trigger_cg_hook', None)
if cypy.is_callable(method):
method(name, g, None, *args, **kwargs)
else:
method = getattr(child, name, None)
if cypy.is_callable(method):
method(*args, **kwargs)
if appended_context:
g.pop_context()
del self.__appended_context[g]
def trigger_staged_cg_hook(self, name, g, *args, **kwargs):
"""Calls a three-staged hook:
1. ``"pre_"+name``
2. ``"in_"+name``
3. ``"post_"+name``
"""
print_hooks = self._print_hooks
# TODO: document name lookup business
# TODO: refactor this context stuff, its confusing
hook_name = "pre_" + name
printed_name = hook_name if print_hooks else None
self.trigger_cg_hook(hook_name, g, printed_name, *args, **kwargs) # TODO: avoid copies
hook_name = "in_" + name
printed_name = hook_name if print_hooks else None
self.trigger_cg_hook(hook_name, g, printed_name, *args, **kwargs)
hook_name = "post_" + name
printed_name = hook_name if print_hooks else None
self.trigger_cg_hook(hook_name, g, printed_name, *args, **kwargs)
_print_hooks = False
@cypy.lazy(property)
def _name_lookup(self):
return cypy.attr_lookup(self)
class StandaloneCode(Node):
"""A node for inserting standalone code at a particular named hook."""
@cypy.autoinit
def __init__(self, parent, hook=None, code=None, basename="StandaloneCode"):
pass
def __getattr__(self, name):
if name != 'hook' and name == self.hook:
return self.__insert_code
raise AttributeError(name)
def __insert_code(self, g):
self.code >> g
class Listener(Node):
def __init__(self, parent, hook, callback, basename="Listener"):
Node.__init__(self, parent, basename)
self.hook = hook
self.callback = callback
setattr(self, hook, callback)
| |
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
_PY26 = sys.version_info[0:2] == (2, 6)
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import importutils
from cinder.openstack.common import jsonutils
from cinder.openstack.common import local
# NOTE(flaper87): Pls, remove when graduating this module
# from the incubator.
from cinder.openstack.common.strutils import mask_password # noqa
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN']
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def isEnabledFor(self, level):
if _PY26:
# This method was added in python 2.7 (and it does the exact
# same logic, so we need to do the exact same logic so that
# python 2.6 has this capability as well).
return self.logger.isEnabledFor(level)
else:
return super(BaseLoggerAdapter, self).isEnabledFor(level)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
if six.PY3:
# In Python 3, the code fails because the 'manager' attribute
# cannot be found when using a LoggerAdapter as the
# underlying logger. Work around this issue.
self._logger.manager = self._logger.logger.manager
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except (moves.configparser.Error, KeyError) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string=None,
default_log_levels=None):
# Just in case the caller is not setting the
# default_log_level. This is insurance because
# we introduced the default_log_level parameter
# later in a backwards in-compatible change
if default_log_levels is not None:
cfg.set_defaults(
log_opts,
default_log_levels=default_log_levels)
if logging_context_format_string is not None:
cfg.set_defaults(
log_opts,
logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
try:
handler = importutils.import_object(
"cinder.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
except ImportError:
handler = importutils.import_object(
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations(object):
"""SubnetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subnet, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_01_01.models.Subnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Subnet"]
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2018_01_01.models.Subnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Subnet or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.Subnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SubnetListResult"]
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubnetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.SubnetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubnetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'} # type: ignore
| |
"""
Code to manage the creation and SQL rendering of 'where' constraints.
"""
import datetime
from django.utils import tree
from django.db import connection
from django.db.models.fields import Field
from django.db.models.query_utils import QueryWrapper
from datastructures import EmptyResultSet, FullResultSet
# Connection types
AND = 'AND'
OR = 'OR'
class EmptyShortCircuit(Exception):
"""
Internal exception used to indicate that a "matches nothing" node should be
added to the where-clause.
"""
pass
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
The children in this tree are usually either Q-like objects or lists of
[table_alias, field_name, db_type, lookup_type, value_annotation,
params]. However, a child could also be any class with as_sql() and
relabel_aliases() methods.
"""
default = AND
def add(self, data, connector):
"""
Add a node to the where-tree. If the data is a list or tuple, it is
expected to be of the form (alias, col_name, field_obj, lookup_type,
value), which is then slightly munged before being stored (to avoid
storing any reference to field objects). Otherwise, the 'data' is
stored unchanged and can be anything with an 'as_sql()' method.
"""
if not isinstance(data, (list, tuple)):
super(WhereNode, self).add(data, connector)
return
obj, lookup_type, value = data
if hasattr(obj, "process"):
try:
obj, params = obj.process(lookup_type, value)
except EmptyShortCircuit:
# There are situations where we want to short-circuit any
# comparisons and make sure that nothing is returned. One
# example is when checking for a NULL pk value, or the
# equivalent.
super(WhereNode, self).add(NothingNode(), connector)
return
else:
params = Field().get_db_prep_lookup(lookup_type, value)
# The "annotation" parameter is used to pass auxilliary information
# about the value(s) to the query construction. Specifically, datetime
# and empty values need special handling. Other types could be used
# here in the future (using Python types is suggested for consistency).
if isinstance(value, datetime.datetime):
annotation = datetime.datetime
elif hasattr(value, 'value_annotation'):
annotation = value.value_annotation
else:
annotation = bool(value)
super(WhereNode, self).add((obj, lookup_type, annotation, params),
connector)
def as_sql(self, qn=None):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns None, None if this node is empty.
If 'node' is provided, that is the root of the SQL generation
(generally not needed except by the internal implementation for
recursion).
"""
if not qn:
qn = connection.ops.quote_name
if not self.children:
return None, []
result = []
result_params = []
empty = True
for child in self.children:
try:
if hasattr(child, 'as_sql'):
sql, params = child.as_sql(qn=qn)
else:
# A leaf node in the tree.
sql, params = self.make_atom(child, qn)
except EmptyResultSet:
if self.connector == AND and not self.negated:
# We can bail out early in this particular case (only).
raise
elif self.negated:
empty = False
continue
except FullResultSet:
if self.connector == OR:
if self.negated:
empty = True
break
# We match everything. No need for any constraints.
return '', []
if self.negated:
empty = True
continue
empty = False
if sql:
result.append(sql)
result_params.extend(params)
if empty:
raise EmptyResultSet
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
sql_string = 'NOT (%s)' % sql_string
elif len(self.children) != 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def make_atom(self, child, qn):
"""
Turn a tuple (table_alias, column_name, db_type, lookup_type,
value_annot, params) into valid SQL.
Returns the string for the SQL fragment and the parameters to use for
it.
"""
lvalue, lookup_type, value_annot, params = child
if isinstance(lvalue, tuple):
# A direct database column lookup.
field_sql = self.sql_for_columns(lvalue, qn)
else:
# A smart object with an as_sql() method.
field_sql = lvalue.as_sql(quote_func=qn)
if value_annot is datetime.datetime:
cast_sql = connection.ops.datetime_cast_sql()
else:
cast_sql = '%s'
if hasattr(params, 'as_sql'):
extra, params = params.as_sql(qn)
cast_sql = ''
else:
extra = ''
if lookup_type in connection.operators:
format = "%s %%s %s" % (connection.ops.lookup_cast(lookup_type),
extra)
return (format % (field_sql,
connection.operators[lookup_type] % cast_sql), params)
if lookup_type == 'in':
if not value_annot:
raise EmptyResultSet
if extra:
return ('%s IN %s' % (field_sql, extra), params)
return ('%s IN (%s)' % (field_sql, ', '.join(['%s'] * len(params))),
params)
elif lookup_type in ('range', 'year'):
return ('%s BETWEEN %%s and %%s' % field_sql, params)
elif lookup_type in ('month', 'day'):
return ('%s = %%s' % connection.ops.date_extract_sql(lookup_type,
field_sql), params)
elif lookup_type == 'isnull':
return ('%s IS %sNULL' % (field_sql,
(not value_annot and 'NOT ' or '')), ())
elif lookup_type == 'search':
return (connection.ops.fulltext_search_sql(field_sql), params)
elif lookup_type in ('regex', 'iregex'):
return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params
raise TypeError('Invalid lookup_type: %r' % lookup_type)
def sql_for_columns(self, data, qn):
"""
Returns the SQL fragment used for the left-hand side of a column
constraint (for example, the "T1.foo" portion in the clause
"WHERE ... T1.foo = 6").
"""
table_alias, name, db_type = data
if table_alias:
lhs = '%s.%s' % (qn(table_alias), qn(name))
else:
lhs = qn(name)
return connection.ops.field_cast_sql(db_type) % lhs
def relabel_aliases(self, change_map, node=None):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
if not node:
node = self
for pos, child in enumerate(node.children):
if hasattr(child, 'relabel_aliases'):
child.relabel_aliases(change_map)
elif isinstance(child, tree.Node):
self.relabel_aliases(change_map, child)
else:
elt = list(child[0])
if elt[0] in change_map:
elt[0] = change_map[elt[0]]
node.children[pos] = (tuple(elt),) + child[1:]
# Check if the query value also requires relabelling
if hasattr(child[3], 'relabel_aliases'):
child[3].relabel_aliases(change_map)
class EverythingNode(object):
"""
A node that matches everything.
"""
def as_sql(self, qn=None):
raise FullResultSet
def relabel_aliases(self, change_map, node=None):
return
class NothingNode(object):
"""
A node that matches nothing.
"""
def as_sql(self, qn=None):
raise EmptyResultSet
def relabel_aliases(self, change_map, node=None):
return
class Constraint(object):
"""
An object that can be passed to WhereNode.add() and knows how to
pre-process itself prior to including in the WhereNode.
"""
def __init__(self, alias, col, field):
self.alias, self.col, self.field = alias, col, field
def process(self, lookup_type, value):
"""
Returns a tuple of data suitable for inclusion in a WhereNode
instance.
"""
# Because of circular imports, we need to import this here.
from django.db.models.base import ObjectDoesNotExist
try:
if self.field:
params = self.field.get_db_prep_lookup(lookup_type, value)
db_type = self.field.db_type()
else:
# This branch is used at times when we add a comparison to NULL
# (we don't really want to waste time looking up the associated
# field object at the calling location).
params = Field().get_db_prep_lookup(lookup_type, value)
db_type = None
except ObjectDoesNotExist:
raise EmptyShortCircuit
return (self.alias, self.col, db_type), params
| |
#############################################################################
#
# Voronoi diagram calculator/ Delaunay triangulator
# Translated to Python by Bill Simons
# September, 2005
#
# Additional changes by Carson Farmer added November 2010
#
# Calculate Delaunay triangulation or the Voronoi polygons for a set of
# 2D input points.
#
# Derived from code bearing the following notice:
#
# The author of this software is Steven Fortune. Copyright (c) 1994 by AT&T
# Bell Laboratories.
# Permission to use, copy, modify, and distribute this software for any
# purpose without fee is hereby granted, provided that this entire notice
# is included in all copies of any software which is or includes a copy
# or modification of this software and in all copies of the supporting
# documentation for such software.
# THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR AT&T MAKE ANY
# REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
# OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
#
# Comments were incorporated from Shane O'Sullivan's translation of the
# original code into C++ (http://mapviewer.skynet.ie/voronoi.html)
#
# Steve Fortune's homepage: http://netlib.bell-labs.com/cm/cs/who/sjf/index.html
#
#############################################################################
"""
Where the actual behind-the scenes work takes place. This is
the original voronoi diagram calculator/Delauney triangulator
by Bill Simons and Carson Farmer.
"""
##"""
##voronoi - compute Voronoi diagram or Delaunay triangulation
##
##voronoi [-t -p -d] [filename]
##
##Voronoi reads from filename (or standard input if no filename given) for a set
##of points in the plane and writes either the Voronoi diagram or the Delaunay
##triangulation to the standard output. Each input line should consist of two
##real numbers, separated by white space.
##
##If option -t is present, the Delaunay triangulation is produced.
##Each output line is a triple i j k, which are the indices of the three points
##in a Delaunay triangle. Points are numbered starting at 0.
##
##If option -t is not present, the Voronoi diagram is produced.
##There are four output record types.
##
##s a b indicates that an input point at coordinates a b was seen.
##l a b c indicates a line with equation ax + by = c.
##v a b indicates a vertex at a b.
##e l v1 v2 indicates a Voronoi segment which is a subsegment of line number l
## with endpoints numbered v1 and v2. If v1 or v2 is -1, the line
## extends to infinity.
##
##Other options include:
##
##d Print debugging info
##
##p Produce output suitable for input to plot (1), rather than the forms
## described above.
##
##On unsorted data uniformly distributed in the unit square, voronoi uses about
##20n+140 bytes of storage.
##
##AUTHOR
##Steve J. Fortune (1987) A Sweepline Algorithm for Voronoi Diagrams,
##Algorithmica 2, 153-174.
##"""
#############################################################################
#
# For programmatic use two functions are available:
#
# computeVoronoiDiagram(points)
#
# Takes a list of point objects (which must have x and y fields).
# Returns a 3-tuple of:
#
# (1) a list of 2-tuples, which are the x,y coordinates of the
# Voronoi diagram vertices
# (2) a list of 3-tuples (a,b,c) which are the equations of the
# lines in the Voronoi diagram: a*x + b*y = c
# (3) a list of 3-tuples, (l, v1, v2) representing edges of the
# Voronoi diagram. l is the index of the line, v1 and v2 are
# the indices of the vetices at the end of the edge. If
# v1 or v2 is -1, the line extends to infinity.
#
# computeDelaunayTriangulation(points):
#
# Takes a list of point objects (which must have x and y fields).
# Returns a list of 3-tuples: the indices of the points that form a
# Delaunay triangle.
#
#############################################################################
import math
import sys
import getopt
TOLERANCE = 1e-9
BIG_FLOAT = 1e38
#------------------------------------------------------------------
class Context(object):
def __init__(self):
self.doPrint = 0
self.debug = 0
self.plot = 0
self.triangulate = False
self.vertices = [] # list of vertex 2-tuples: (x,y)
self.lines = [] # equation of line 3-tuple (a b c), for the equation of the line a*x+b*y = c
self.edges = [] # edge 3-tuple: (line index, vertex 1 index, vertex 2 index) if either vertex index is -1, the edge extends to infiinity
self.triangles = [] # 3-tuple of vertex indices
self.polygons = {} # a dict of site:[edges] pairs
def circle(self,x,y,rad):
pass
def clip_line(self,edge):
pass
def line(self,x0,y0,x1,y1):
pass
def outSite(self,s):
if(self.debug):
print("site (%d) at %f %f" % (s.sitenum, s.x, s.y))
elif(self.triangulate):
pass
elif(self.plot):
self.circle (s.x, s.y, cradius)
elif(self.doPrint):
print("s %f %f" % (s.x, s.y))
def outVertex(self,s):
self.vertices.append((s.x,s.y))
if(self.debug):
print("vertex(%d) at %f %f" % (s.sitenum, s.x, s.y))
elif(self.triangulate):
pass
elif(self.doPrint and not self.plot):
print("v %f %f" % (s.x,s.y))
def outTriple(self,s1,s2,s3):
self.triangles.append((s1.sitenum, s2.sitenum, s3.sitenum))
if(self.debug):
print("circle through left=%d right=%d bottom=%d" % (s1.sitenum, s2.sitenum, s3.sitenum))
elif(self.triangulate and self.doPrint and not self.plot):
print("%d %d %d" % (s1.sitenum, s2.sitenum, s3.sitenum))
def outBisector(self,edge):
self.lines.append((edge.a, edge.b, edge.c))
if(self.debug):
print("line(%d) %gx+%gy=%g, bisecting %d %d" % (edge.edgenum, edge.a, edge.b, edge.c, edge.reg[0].sitenum, edge.reg[1].sitenum))
elif(self.triangulate):
if(self.plot):
self.line(edge.reg[0].x, edge.reg[0].y, edge.reg[1].x, edge.reg[1].y)
elif(self.doPrint and not self.plot):
print("l %f %f %f" % (edge.a, edge.b, edge.c))
def outEdge(self,edge):
sitenumL = -1
if edge.ep[Edge.LE] is not None:
sitenumL = edge.ep[Edge.LE].sitenum
sitenumR = -1
if edge.ep[Edge.RE] is not None:
sitenumR = edge.ep[Edge.RE].sitenum
if edge.reg[0].sitenum not in self.polygons:
self.polygons[edge.reg[0].sitenum] = []
if edge.reg[1].sitenum not in self.polygons:
self.polygons[edge.reg[1].sitenum] = []
self.polygons[edge.reg[0].sitenum].append((edge.edgenum,sitenumL,sitenumR))
self.polygons[edge.reg[1].sitenum].append((edge.edgenum,sitenumL,sitenumR))
self.edges.append((edge.edgenum,sitenumL,sitenumR))
if(not self.triangulate):
if self.plot:
self.clip_line(edge)
elif(self.doPrint):
print("e %d" % edge.edgenum)
print(" %d " % sitenumL)
print("%d" % sitenumR)
#------------------------------------------------------------------
def voronoi(siteList,context):
try:
edgeList = EdgeList(siteList.xmin,siteList.xmax,len(siteList))
priorityQ = PriorityQueue(siteList.ymin,siteList.ymax,len(siteList))
siteIter = siteList.iterator()
bottomsite = next(siteIter, None)
context.outSite(bottomsite)
newsite = next(siteIter, None)
minpt = Site(-BIG_FLOAT,-BIG_FLOAT)
while True:
if not priorityQ.isEmpty():
minpt = priorityQ.getMinPt()
if (newsite and (priorityQ.isEmpty() or newsite < minpt)):
# newsite is smallest - this is a site event
context.outSite(newsite)
# get first Halfedge to the LEFT and RIGHT of the new site
lbnd = edgeList.leftbnd(newsite)
rbnd = lbnd.right
# if this halfedge has no edge, bot = bottom site (whatever that is)
# create a new edge that bisects
bot = lbnd.rightreg(bottomsite)
edge = Edge.bisect(bot,newsite)
context.outBisector(edge)
# create a new Halfedge, setting its pm field to 0 and insert
# this new bisector edge between the left and right vectors in
# a linked list
bisector = Halfedge(edge,Edge.LE)
edgeList.insert(lbnd,bisector)
# if the new bisector intersects with the left edge, remove
# the left edge's vertex, and put in the new one
p = lbnd.intersect(bisector)
if p is not None:
priorityQ.delete(lbnd)
priorityQ.insert(lbnd,p,newsite.distance(p))
# create a new Halfedge, setting its pm field to 1
# insert the new Halfedge to the right of the original bisector
lbnd = bisector
bisector = Halfedge(edge,Edge.RE)
edgeList.insert(lbnd,bisector)
# if this new bisector intersects with the right Halfedge
p = bisector.intersect(rbnd)
if p is not None:
# push the Halfedge into the ordered linked list of vertices
priorityQ.insert(bisector,p,newsite.distance(p))
newsite = next(siteIter, None)
elif not priorityQ.isEmpty():
# intersection is smallest - this is a vector (circle) event
# pop the Halfedge with the lowest vector off the ordered list of
# vectors. Get the Halfedge to the left and right of the above HE
# and also the Halfedge to the right of the right HE
lbnd = priorityQ.popMinHalfedge()
llbnd = lbnd.left
rbnd = lbnd.right
rrbnd = rbnd.right
# get the Site to the left of the left HE and to the right of
# the right HE which it bisects
bot = lbnd.leftreg(bottomsite)
top = rbnd.rightreg(bottomsite)
# output the triple of sites, stating that a circle goes through them
mid = lbnd.rightreg(bottomsite)
context.outTriple(bot,top,mid)
# get the vertex that caused this event and set the vertex number
# couldn't do this earlier since we didn't know when it would be processed
v = lbnd.vertex
siteList.setSiteNumber(v)
context.outVertex(v)
# set the endpoint of the left and right Halfedge to be this vector
if lbnd.edge.setEndpoint(lbnd.pm,v):
context.outEdge(lbnd.edge)
if rbnd.edge.setEndpoint(rbnd.pm,v):
context.outEdge(rbnd.edge)
# delete the lowest HE, remove all vertex events to do with the
# right HE and delete the right HE
edgeList.delete(lbnd)
priorityQ.delete(rbnd)
edgeList.delete(rbnd)
# if the site to the left of the event is higher than the Site
# to the right of it, then swap them and set 'pm' to RIGHT
pm = Edge.LE
if bot.y > top.y:
bot,top = top,bot
pm = Edge.RE
# Create an Edge (or line) that is between the two Sites. This
# creates the formula of the line, and assigns a line number to it
edge = Edge.bisect(bot, top)
context.outBisector(edge)
# create a HE from the edge
bisector = Halfedge(edge, pm)
# insert the new bisector to the right of the left HE
# set one endpoint to the new edge to be the vector point 'v'
# If the site to the left of this bisector is higher than the right
# Site, then this endpoint is put in position 0; otherwise in pos 1
edgeList.insert(llbnd, bisector)
if edge.setEndpoint(Edge.RE - pm, v):
context.outEdge(edge)
# if left HE and the new bisector don't intersect, then delete
# the left HE, and reinsert it
p = llbnd.intersect(bisector)
if p is not None:
priorityQ.delete(llbnd);
priorityQ.insert(llbnd, p, bot.distance(p))
# if right HE and the new bisector don't intersect, then reinsert it
p = bisector.intersect(rrbnd)
if p is not None:
priorityQ.insert(bisector, p, bot.distance(p))
else:
break
he = edgeList.leftend.right
while he is not edgeList.rightend:
context.outEdge(he.edge)
he = he.right
Edge.EDGE_NUM = 0
except Exception as err:
print("######################################################")
print(str(err))
#------------------------------------------------------------------
def isEqual(a,b,relativeError=TOLERANCE):
# is nearly equal to within the allowed relative error
norm = max(abs(a),abs(b))
return (norm < relativeError) or (abs(a - b) < (relativeError * norm))
#------------------------------------------------------------------
class Site(object):
def __init__(self,x=0.0,y=0.0,sitenum=0):
self.x = x
self.y = y
self.sitenum = sitenum
def dump(self):
print("Site #%d (%g, %g)" % (self.sitenum,self.x,self.y))
def _compare(self,other):
if self.y < other.y:
return -1
elif self.y > other.y:
return 1
elif self.x < other.x:
return -1
elif self.x > other.x:
return 1
else:
return 0
def __lt__(self,other):
return self._compare(other) == -1
def __le__(self,other):
return self._compare(other) in (-1,0)
def __gt__(self,other):
return self._compare(other) == 1
def __ge__(self,other):
return self._compare(other) in (1,0)
def __eq__(self,other):
return self._compare(other) == 0
def __ne__(self,other):
return self._compare(other) != 0
def distance(self,other):
dx = self.x - other.x
dy = self.y - other.y
return math.sqrt(dx*dx + dy*dy)
#------------------------------------------------------------------
class Edge(object):
LE = 0
RE = 1
EDGE_NUM = 0
DELETED = {} # marker value
def __init__(self):
self.a = 0.0
self.b = 0.0
self.c = 0.0
self.ep = [None,None]
self.reg = [None,None]
self.edgenum = 0
def dump(self):
print("(#%d a=%g, b=%g, c=%g)" % (self.edgenum,self.a,self.b,self.c))
print("ep",self.ep)
print("reg",self.reg)
def setEndpoint(self, lrFlag, site):
self.ep[lrFlag] = site
if self.ep[Edge.RE - lrFlag] is None:
return False
return True
@staticmethod
def bisect(s1,s2):
newedge = Edge()
newedge.reg[0] = s1 # store the sites that this edge is bisecting
newedge.reg[1] = s2
# to begin with, there are no endpoints on the bisector - it goes to infinity
# ep[0] and ep[1] are None
# get the difference in x dist between the sites
dx = float(s2.x - s1.x)
dy = float(s2.y - s1.y)
adx = abs(dx) # make sure that the difference in positive
ady = abs(dy)
# get the slope of the line
newedge.c = float(s1.x * dx + s1.y * dy + (dx*dx + dy*dy)*0.5)
if adx > ady :
# set formula of line, with x fixed to 1
newedge.a = 1.0
newedge.b = dy/dx
newedge.c /= dx
else:
# set formula of line, with y fixed to 1
newedge.b = 1.0
newedge.a = dx/dy
newedge.c /= dy
newedge.edgenum = Edge.EDGE_NUM
Edge.EDGE_NUM += 1
return newedge
#------------------------------------------------------------------
class Halfedge(object):
def __init__(self,edge=None,pm=Edge.LE):
self.left = None # left Halfedge in the edge list
self.right = None # right Halfedge in the edge list
self.qnext = None # priority queue linked list pointer
self.edge = edge # edge list Edge
self.pm = pm
self.vertex = None # Site()
self.ystar = BIG_FLOAT
def dump(self):
print("Halfedge--------------------------")
print("left: ", self.left)
print("right: ", self.right)
print("edge: ", self.edge)
print("pm: ", self.pm)
print("vertex: ")
if self.vertex: self.vertex.dump()
else: print("None")
print("ystar: ", self.ystar)
def _compare(self,other):
if self.ystar > other.ystar:
return 1
elif self.ystar < other.ystar:
return -1
elif self.vertex.x > other.vertex.x:
return 1
elif self.vertex.x < other.vertex.x:
return -1
else:
return 0
def __lt__(self,other):
return self._compare(other) == -1
def __le__(self,other):
return self._compare(other) in (-1,0)
def __gt__(self,other):
return self._compare(other) == 1
def __ge__(self,other):
return self._compare(other) in (1,0)
def __eq__(self,other):
return self._compare(other) == 0
def __ne__(self,other):
return self._compare(other) != 0
def leftreg(self,default):
if not self.edge:
return default
elif self.pm == Edge.LE:
return self.edge.reg[Edge.LE]
else:
return self.edge.reg[Edge.RE]
def rightreg(self,default):
if not self.edge:
return default
elif self.pm == Edge.LE:
return self.edge.reg[Edge.RE]
else:
return self.edge.reg[Edge.LE]
# returns True if p is to right of halfedge self
def isPointRightOf(self,pt):
e = self.edge
topsite = e.reg[1]
right_of_site = pt.x > topsite.x
if(right_of_site and self.pm == Edge.LE):
return True
if(not right_of_site and self.pm == Edge.RE):
return False
if(e.a == 1.0):
dyp = pt.y - topsite.y
dxp = pt.x - topsite.x
fast = 0;
if ((not right_of_site and e.b < 0.0) or (right_of_site and e.b >= 0.0)):
above = dyp >= e.b * dxp
fast = above
else:
above = pt.x + pt.y * e.b > e.c
if(e.b < 0.0):
above = not above
if (not above):
fast = 1
if (not fast):
dxs = topsite.x - (e.reg[0]).x
above = e.b * (dxp*dxp - dyp*dyp) < dxs*dyp*(1.0+2.0*dxp/dxs + e.b*e.b)
if(e.b < 0.0):
above = not above
else: # e.b == 1.0
yl = e.c - e.a * pt.x
t1 = pt.y - yl
t2 = pt.x - topsite.x
t3 = yl - topsite.y
above = t1*t1 > t2*t2 + t3*t3
if(self.pm==Edge.LE):
return above
else:
return not above
#--------------------------
# create a new site where the Halfedges el1 and el2 intersect
def intersect(self,other):
e1 = self.edge
e2 = other.edge
if (e1 is None) or (e2 is None):
return None
# if the two edges bisect the same parent return None
if e1.reg[1] is e2.reg[1]:
return None
d = e1.a * e2.b - e1.b * e2.a
if isEqual(d,0.0):
return None
xint = (e1.c*e2.b - e2.c*e1.b) / d
yint = (e2.c*e1.a - e1.c*e2.a) / d
if(e1.reg[1] < e2.reg[1]):
he = self
e = e1
else:
he = other
e = e2
rightOfSite = xint >= e.reg[1].x
if((rightOfSite and he.pm == Edge.LE) or
(not rightOfSite and he.pm == Edge.RE)):
return None
# create a new site at the point of intersection - this is a new
# vector event waiting to happen
return Site(xint,yint)
#------------------------------------------------------------------
class EdgeList(object):
def __init__(self,xmin,xmax,nsites):
if xmin > xmax: xmin,xmax = xmax,xmin
self.hashsize = int(2*math.sqrt(nsites+4))
self.xmin = xmin
self.deltax = float(xmax - xmin)
self.hash = [None]*self.hashsize
self.leftend = Halfedge()
self.rightend = Halfedge()
self.leftend.right = self.rightend
self.rightend.left = self.leftend
self.hash[0] = self.leftend
self.hash[-1] = self.rightend
def insert(self,left,he):
he.left = left
he.right = left.right
left.right.left = he
left.right = he
def delete(self,he):
he.left.right = he.right
he.right.left = he.left
he.edge = Edge.DELETED
# Get entry from hash table, pruning any deleted nodes
def gethash(self,b):
if(b < 0 or b >= self.hashsize):
return None
he = self.hash[b]
if he is None or he.edge is not Edge.DELETED:
return he
# Hash table points to deleted half edge. Patch as necessary.
self.hash[b] = None
return None
def leftbnd(self,pt):
# Use hash table to get close to desired halfedge
bucket = int(((pt.x - self.xmin)/self.deltax * self.hashsize))
if(bucket < 0):
bucket =0;
if(bucket >=self.hashsize):
bucket = self.hashsize-1
he = self.gethash(bucket)
if(he is None):
i = 1
while True:
he = self.gethash(bucket-i)
if (he is not None): break;
he = self.gethash(bucket+i)
if (he is not None): break;
i += 1
# Now search linear list of halfedges for the corect one
if (he is self.leftend) or (he is not self.rightend and he.isPointRightOf(pt)):
he = he.right
while he is not self.rightend and he.isPointRightOf(pt):
he = he.right
he = he.left;
else:
he = he.left
while (he is not self.leftend and not he.isPointRightOf(pt)):
he = he.left
# Update hash table and reference counts
if(bucket > 0 and bucket < self.hashsize-1):
self.hash[bucket] = he
return he
#------------------------------------------------------------------
class PriorityQueue(object):
def __init__(self,ymin,ymax,nsites):
self.ymin = ymin
self.deltay = ymax - ymin
self.hashsize = int(4 * math.sqrt(nsites))
self.count = 0
self.minidx = 0
self.hash = []
for i in range(self.hashsize):
self.hash.append(Halfedge())
def __len__(self):
return self.count
def isEmpty(self):
return self.count == 0
def insert(self,he,site,offset):
he.vertex = site
he.ystar = site.y + offset
last = self.hash[self.getBucket(he)]
next = last.qnext
while((next is not None) and he > next):
last = next
next = last.qnext
he.qnext = last.qnext
last.qnext = he
self.count += 1
def delete(self,he):
if (he.vertex is not None):
last = self.hash[self.getBucket(he)]
while last.qnext is not he:
last = last.qnext
last.qnext = he.qnext
self.count -= 1
he.vertex = None
def getBucket(self,he):
bucket = int(((he.ystar - self.ymin) / self.deltay) * self.hashsize)
if bucket < 0: bucket = 0
if bucket >= self.hashsize: bucket = self.hashsize-1
if bucket < self.minidx: self.minidx = bucket
return bucket
def getMinPt(self):
while(self.hash[self.minidx].qnext is None):
self.minidx += 1
he = self.hash[self.minidx].qnext
x = he.vertex.x
y = he.ystar
return Site(x,y)
def popMinHalfedge(self):
curr = self.hash[self.minidx].qnext
self.hash[self.minidx].qnext = curr.qnext
self.count -= 1
return curr
#------------------------------------------------------------------
class SiteList(object):
def __init__(self,pointList):
self.__sites = []
self.__sitenum = 0
self.__xmin = pointList[0].x
self.__ymin = pointList[0].y
self.__xmax = pointList[0].x
self.__ymax = pointList[0].y
for i,pt in enumerate(pointList):
self.__sites.append(Site(pt.x,pt.y,i))
if pt.x < self.__xmin: self.__xmin = pt.x
if pt.y < self.__ymin: self.__ymin = pt.y
if pt.x > self.__xmax: self.__xmax = pt.x
if pt.y > self.__ymax: self.__ymax = pt.y
self.__sites.sort()
def setSiteNumber(self,site):
site.sitenum = self.__sitenum
self.__sitenum += 1
def iterator(self):
for item in self.__sites:
yield item
def __iter__(self):
return self.iterator()
def __len__(self):
return len(self.__sites)
def _getxmin(self): return self.__xmin
def _getymin(self): return self.__ymin
def _getxmax(self): return self.__xmax
def _getymax(self): return self.__ymax
xmin = property(_getxmin)
ymin = property(_getymin)
xmax = property(_getxmax)
ymax = property(_getymax)
#------------------------------------------------------------------
def computeVoronoiDiagram(points):
""" Takes a list of point objects (which must have x and y fields).
Returns a 3-tuple of:
(1) a list of 2-tuples, which are the x,y coordinates of the
Voronoi diagram vertices
(2) a list of 3-tuples (a,b,c) which are the equations of the
lines in the Voronoi diagram: a*x + b*y = c
(3) a list of 3-tuples, (l, v1, v2) representing edges of the
Voronoi diagram. l is the index of the line, v1 and v2 are
the indices of the vetices at the end of the edge. If
v1 or v2 is -1, the line extends to infinity.
"""
siteList = SiteList(points)
context = Context()
voronoi(siteList,context)
return (context.vertices, context.edges, context.polygons)
#------------------------------------------------------------------
def computeDelaunayTriangulation(points):
""" Takes a list of point objects (which must have x and y fields).
Returns a list of 3-tuples: the indices of the points that form a
Delaunay triangle.
"""
siteList = SiteList(points)
context = Context()
context.triangulate = True
voronoi(siteList,context)
return context.triangles
#-----------------------------------------------------------------------------
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The Ferretcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class FerretcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = FerretcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TagKey.status'
db.add_column(
'sentry_filterkey',
'status',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0),
keep_default=False
)
def backwards(self, orm):
# Deleting field 'TagKey.status'
db.delete_column('sentry_filterkey', 'status')
models = {
'sentry.accessgroup': {
'Meta': {
'unique_together': "(('team', 'name'),)",
'object_name': 'AccessGroup'
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.User']",
'symmetrical': 'False'
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'symmetrical': 'False'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.alert': {
'Meta': {
'object_name': 'Alert'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'related_groups': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'related_alerts'",
'symmetrical': 'False',
'through': "orm['sentry.AlertRelatedGroup']",
'to': "orm['sentry.Group']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.alertrelatedgroup': {
'Meta': {
'unique_together': "(('group', 'alert'),)",
'object_name': 'AlertRelatedGroup'
},
'alert': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Alert']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'audit_actors'",
'to': "orm['sentry.User']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'badge': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.file': {
'Meta': {
'unique_together': "(('name', 'checksum'),)",
'object_name': 'File'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'storage':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'storage_options': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'type':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.pendingteammember': {
'Meta': {
'unique_together': "(('team', 'email'),)",
'object_name': 'PendingTeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'pending_member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'user_added': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'keys_added_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.teammember': {
'Meta': {
'unique_together': "(('team', 'user'),)",
'object_name': 'TeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| |
#!/usr/bin/env python
import os, uuid, urllib, re, sys, datetime, webbrowser
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
from pymongo import MongoClient
from bson.objectid import ObjectId
mongo_client = None
class NCBI:
def __init__(self):
self._eutils_base_url = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
def get_entry(self, entry_id):
response = urllib.urlopen("%sefetch.fcgi?db=nucleotide&id=%s&rettype=gbwithparts"%(self._eutils_base_url, entry_id))
content = str(response.read())
response.close()
return content
def parse_entry(self, entry_content):
pieces_of_seq=[]
start_of_sequence = False
accession = None
feature_type = None
qualifer_type = None
qualifier_content = None
qualifiers = []
genomic_strand = '+'
genomic_positions = None
features = []
organism = None
inOrganism = False
lineage = ""
location = None
lines = entry_content.strip().split('\n')
if not lines[-1].strip() == '//':
raise Exception("Uncomplete file")
for line in lines:
tokens = re.split('\s+', line)
if line.startswith('ACCESSION'):
accession = re.split('\s+', line)[1]
elif line.strip().startswith('ORGANISM'):
organism = line.strip().split('ORGANISM')[1].strip()
inOrganism = True
elif line.strip().startswith('REFERENCE'):
inOrganism = False
elif line.startswith('ORIGIN'): #the genomic sequence
start_of_sequence = True
#we store the last feature
#the last
if feature_type and not feature_type == "source" and not feature_type == "intron":
if location.startswith("complement(join("):#a joined location on the Crick strand
genomic_strand = '-'
ends = location.split('complement(join(')[1][:-2].replace("join(", "").replace(')','').replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
if feature_type == 'CDS':
for i in range(1, len(ends)-2, 2):
intron = {
'type': 'intron',
'genomicPositions': [ends[i]+1, ends[i+1]-1],
'genomicStrand': genomic_strand,
}
features.append(intron)
elif location.startswith("join(complement("):#a joined location on the Crick strand
genomic_strand = '-'
ends = location.split('join(complement(')[1][:-2].replace("complement(", "").replace(')', '').replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
if feature_type == 'CDS':
for i in range(1, len(ends)-2, 2):
intron = {
'type': 'intron',
'genomicPositions': [ends[i]+1, ends[i+1]-1],
'genomicStrand': genomic_strand,
}
features.append(intron)
elif location.startswith("complement(order("):
genomic_strand = '-'
ends = location.split('complement(order(')[1][:-2].replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
elif location.startswith("order("):
ends = location.split('order(')[1][:-1].replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
elif location.startswith("complement("): #a location on the Crick strand
genomic_strand = '-'
ends = location.split('complement(')[1][:-1].split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
elif location.startswith("join("): #a joined location
ends = location.split('join(')[1][:-1].replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
if feature_type == 'CDS':
for i in range(1, len(ends)-2, 2):
intron = {
'type': 'intron',
'genomicPositions': [ends[i]+1, ends[i+1]-1],
'genomicStrand': genomic_strand,
}
features.append(intron)
else: #a regular location
ends = location.split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
feature = {
'type': feature_type,
'genomicPositions': genomic_positions,
'genomicStrand': genomic_strand,
}
if qualifer_type and qualifier_content:
if qualifer_type == 'translation':
qualifier_content = qualifier_content.replace(" ","")
qualifiers.append({
"type": qualifer_type,
"content": qualifier_content
})
for qualifier in qualifiers:
feature[qualifier['type']] = qualifier['content']
features.append(feature)
elif len(tokens) == 3 and re.findall('\.\.>?[0-9]+', tokens[2]):
#new feature
#we store the previous one (if any)
if feature_type and not feature_type == "source" and not feature_type == "intron":
if location.startswith("complement(join("):#a joined location on the Crick strand
genomic_strand = '-'
ends = location.split('complement(join(')[1][:-2].replace("join(", "").replace(')','').replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
if feature_type == 'CDS':
for i in range(1, len(ends)-2, 2):
intron = {
'type': 'intron',
'genomicPositions': [ends[i]+1, ends[i+1]-1],
'genomicStrand': genomic_strand,
}
features.append(intron)
elif location.startswith("join(complement("):#a joined location on the Crick strand
genomic_strand = '-'
ends = location.split('join(complement(')[1][:-2].replace("complement(", "").replace(')', '').replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
if feature_type == 'CDS':
for i in range(1, len(ends)-2, 2):
intron = {
'type': 'intron',
'genomicPositions': [ends[i]+1, ends[i+1]-1],
'genomicStrand': genomic_strand,
}
features.append(intron)
elif location.startswith("complement(order("):
genomic_strand = '-'
ends = location.split('complement(order(')[1][:-2].replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
elif location.startswith("order("):
ends = location.split('order(')[1][:-1].replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
elif location.startswith("complement("): #a location on the Crick strand
genomic_strand = '-'
ends = location.split('complement(')[1][:-1].split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
elif location.startswith("join("): #a joined location
ends = location.split('join(')[1][:-1].replace(',','..').split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
if feature_type == 'CDS':
for i in range(1, len(ends)-2, 2):
intron = {
'type': 'intron',
'genomicPositions': [ends[i]+1, ends[i+1]-1],
'genomicStrand': genomic_strand,
}
features.append(intron)
else: #a regular location
ends = location.split('..')
ends = map(lambda end: int(end.replace('>','').replace('<','')), ends)
genomic_positions = [min(ends), max(ends)]
feature = {
'type': feature_type,
'genomicPositions': genomic_positions,
'genomicStrand': genomic_strand,
}
if qualifer_type and qualifier_content:
if qualifer_type == 'translation':
qualifier_content = qualifier_content.replace(" ","")
qualifiers.append({
"type": qualifer_type,
"content": qualifier_content
})
for qualifier in qualifiers:
feature[qualifier['type']] = qualifier['content']
features.append(feature)
feature_type = None
genomic_strand = '+'
genomic_positions = None
qualifer_type = None
qualifier_content = None
qualifiers = []
feature_type = tokens[1].strip()
location = tokens[2].strip()
elif not qualifer_type and not qualifier_content and len(tokens) == 2 and re.findall('\.\.',tokens[1]): #still the content of the current location
location += tokens[1].strip()
elif re.findall('^\s+/.+=', line): # a new qualifier /bla_bla=
if qualifer_type and qualifier_content:
if qualifer_type == 'translation':
qualifier_content = qualifier_content.replace(" ","")
qualifiers.append({
"type": qualifer_type,
"content": qualifier_content
})
qualifer_type = line.strip()[1:].split('=')[0].strip()[0:]
qualifier_content = line.strip()[1:].split('=')[1].strip().replace('"','')
elif re.findall('^\s+/.+', line): # a qualifier like /manual => ignore
pass
elif not start_of_sequence and qualifer_type and qualifier_content : #still the content of the current qualifier
qualifier_content += " "+line.strip().replace('"','')
elif line.startswith('//'): #end of the genomic sequence
start_of_sequence = False
elif start_of_sequence:
pieces_of_seq.append(''.join(re.split('\s+',line.strip())[1:]).upper())
elif inOrganism:
lineage += " "+line.strip()
return organism, ''.join(pieces_of_seq), features
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class WebSocketHandler(tornado.websocket.WebSocketHandler):
clients = {}
def open(self, *args):
self.id = uuid.uuid4()
self.clients[self.id] = {'id':self.id}
print "New client connected"
def on_message(self, message):
import json
message = json.loads(message)
if message['header'] == 'get available projects':
databases_names = mongo_client.database_names()
answer = {'header': 'got available projects'}
answer['projects'] = databases_names
self.write_message(answer, binary = False)
elif message['header'] == 'create project':
project_name = message['name'].replace(' ','_')
ncbi = NCBI()
organism, sequence, features = ncbi.parse_entry(ncbi.get_entry(message['ncbi_id']))
self.clients[self.id]['db'] = mongo_client[project_name]
genome_description = {
'_id': str(ObjectId()),
'name': message['ncbi_id'],
'sequence': sequence,
'source': 'db:ncbi:%s'%message['ncbi_id'],
'organism': organism
}
self.clients[self.id]['db']['genomes'].insert(genome_description)
annotations = []
for feature in features:
annotation = {
'_id': str(ObjectId()),
'class': feature['type'],
'source': 'db:ncbi:%s'%message['ncbi_id'],
'organism': organism,
'genomicPositions': feature['genomicPositions'],
'genomicStrand': feature['genomicStrand'],
'genome': "%s@genomes"%genome_description['_id'],
'genomeName': message['ncbi_id'],
}
for key in feature:
if not key in ['type', 'genomicPositions', 'genomicStrand'] and feature.get(key,None):
annotation[key] = feature[key]
annotations.append(annotation)
self.clients[self.id]['db']['annotations'].insert(annotation)
answer = {
'header': 'project created',
'project': project_name,
}
answer['annotations'] = annotations
self.write_message(answer, binary = False)
elif message['header'] == 'add data':
ncbi = NCBI()
organism, sequence, features = ncbi.parse_entry(ncbi.get_entry(message['ncbi_id']))
self.clients[self.id]['db'] = mongo_client[message['project']]
genome_description = {
'_id': str(ObjectId()),
'name': message['ncbi_id'],
'sequence': sequence,
'source': 'db:ncbi:%s'%message['ncbi_id'],
'organism': organism
}
self.clients[self.id]['db']['genomes'].insert(genome_description)
annotations = []
for feature in features:
annotation = {
'_id': str(ObjectId()),
'class': feature['type'],
'source': 'db:ncbi:%s'%message['ncbi_id'],
'organism': organism,
'genomicPositions': feature['genomicPositions'],
'genomicStrand': feature['genomicStrand'],
'genome': "%s@genomes"%genome_description['_id'],
'genomeName': message['ncbi_id'],
}
for key in feature:
if not key in ['type', 'genomicPositions', 'genomicStrand'] and feature.get(key,None):
annotation[key] = feature[key]
annotations.append(annotation)
self.clients[self.id]['db']['annotations'].insert(annotation)
answer = {
'header': 'data added'
}
answer['annotations'] = annotations
self.write_message(answer, binary = False)
elif message['header'] == 'remove project':
mongo_client.drop_database(message['project'])
answer = {
'header': 'project removed'
}
self.write_message(answer, binary = False)
elif message['header'] == 'get all annotations':
project = message['project']
self.clients[self.id]['db'] = mongo_client[project]
annotations = []
for annotation in self.clients[self.id]['db']['annotations'].find():
annotations.append(annotation)
answer = {'header': 'got all annotations'}
answer['annotations'] = annotations
self.write_message(answer, binary = False)
elif message['header'] == 'get genome':
genome = self.clients[self.id]['db']['genomes'].find_one({'_id': message['genome_id']})
answer = {'header': 'got genome'}
answer['genome'] = genome
self.write_message(answer, binary = False)
elif message['header'] == 'get annotations per genome':
annotations = []
for annotation in self.clients[self.id]['db']['annotations'].find({'genome': message['genome_id']+"@genomes"}):
annotations.append(annotation)
answer = {'header': 'got annotations per genome'}
answer['annotations'] = annotations
answer['center'] = message['center']
answer['annotation_id'] = message['annotation_id']
self.write_message(answer, binary = False)
elif message['header'] == 'genome browser dragged':
current_ids = message['current_ids']
genomic_range = message['genomic_range']
for annotation in self.clients[self.id]['db']['annotations'].find({'genome': message['genome_id']+"@genomes"}):
if not annotation['_id'] in current_ids and annotation['genomicPositions'][0] >= genomic_range[0] and annotation['genomicPositions'][0] <= genomic_range[1] or annotation['genomicPositions'][1] >= genomic_range[0] and annotation['genomicPositions'][1] <= genomic_range[1]:
answer = {'header': 'got new annotation to display'}
answer['annotation'] = annotation
self.write_message(answer, binary = False)
def on_close(self):
self.clients.pop(self.id, None)
print "Client disconnected"
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/', IndexHandler),
(r'/websocket', WebSocketHandler)
]
settings = {
'template_path': 'templates',
'static_path': 'static'
}
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == "__main__":
mongodb_host = "localhost"
mongodb_port = 27017
if "-mh" in sys.argv:
mongodb_host = sys.argv[sys.argv.index("-mh")+1]
if "-mp" in sys.argv:
mongodb_port = int(sys.argv[sys.argv.index("-mp")+1])
try :
mongo_client = MongoClient(mongodb_host, mongodb_port)
except Exception, e:
print 'Cannot connect any Mongodb instance hosted at %s:%i'%(mongodb_host, mongodb_port)
print 'Usage: ./server.py [-mh mongodb_host (default: localhost)] [-mp mongodb_port (default: 27017)]'
sys.exit(-1)
tornado.options.parse_command_line()
app = Application()
server = tornado.httpserver.HTTPServer(app)
server.listen(8888)
main_loop = tornado.ioloop.IOLoop.instance()
main_loop.add_timeout(datetime.timedelta(seconds=5), webbrowser.open("http://localhost:8888"))
main_loop.start()
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from keystoneclient import exceptions
from horizon_lib import messages
from horizon_lib import tables
from openstack_horizon import api
from openstack_horizon.dashboards.identity.domains import constants
LOG = logging.getLogger(__name__)
class UpdateUsersLink(tables.LinkAction):
name = "users"
verbose_name = _("Modify Users")
url = "horizon:identity:domains:update"
classes = ("ajax-modal",)
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:list_roles"),
("identity", "identity:list_role_assignments"))
def get_link_url(self, domain):
step = 'update_user_members'
base_url = reverse(self.url, args=[domain.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class UpdateGroupsLink(tables.LinkAction):
name = "groups"
verbose_name = _("Modify Groups")
url = "horizon:identity:domains:update"
classes = ("ajax-modal",)
icon = "pencil"
def get_link_url(self, domain):
step = 'update_group_members'
base_url = reverse(self.url, args=[domain.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class CreateDomainLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Domain")
url = constants.DOMAINS_CREATE_URL
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (('identity', 'identity:create_domain'),)
def allowed(self, request, domain):
return api.keystone.keystone_can_edit_domain()
class EditDomainLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = constants.DOMAINS_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (('identity', 'identity:update_domain'),)
def allowed(self, request, domain):
return api.keystone.keystone_can_edit_domain()
class DeleteDomainsAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Domain",
u"Delete Domains",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Domain",
u"Deleted Domains",
count
)
name = "delete"
policy_rules = (('identity', 'identity:delete_domain'),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_domain()
def delete(self, request, obj_id):
domain = self.table.get_object_by_id(obj_id)
if domain.enabled:
msg = _('Domain "%s" must be disabled before it can be deleted.') \
% domain.name
messages.error(request, msg)
raise exceptions.ClientException(409, msg)
else:
LOG.info('Deleting domain "%s".' % obj_id)
api.keystone.domain_delete(request, obj_id)
class DomainFilterAction(tables.FilterAction):
def allowed(self, request, datum):
multidomain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
return multidomain_support
def filter(self, table, domains, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(domain):
if q in domain.name.lower():
return True
return False
return filter(comp, domains)
class SetDomainContext(tables.Action):
name = "set_domain_context"
verbose_name = _("Set Domain Context")
url = constants.DOMAINS_INDEX_URL
preempt = True
policy_rules = (('identity', 'admin_required'),)
def allowed(self, request, datum):
multidomain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
if not multidomain_support:
return False
ctx = request.session.get("domain_context", None)
if ctx and datum.id == ctx:
return False
return True
def single(self, table, request, obj_id):
if ('domain_context' not in request.session or
request.session['domain_context'] != obj_id):
try:
domain = api.keystone.domain_get(request, obj_id)
request.session['domain_context'] = obj_id
request.session['domain_context_name'] = domain.name
messages.success(request,
_('Domain Context updated to Domain %s.') %
domain.name)
except Exception:
messages.error(request,
_('Unable to set Domain Context.'))
class UnsetDomainContext(tables.Action):
name = "clear_domain_context"
verbose_name = _("Clear Domain Context")
url = constants.DOMAINS_INDEX_URL
preempt = True
requires_input = False
policy_rules = (('identity', 'admin_required'),)
def allowed(self, request, datum):
ctx = request.session.get("domain_context", None)
return ctx is not None
def single(self, table, request, obj_id):
if 'domain_context' in request.session:
request.session.pop("domain_context")
request.session.pop("domain_context_name")
messages.success(request, _('Domain Context cleared.'))
class DomainsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'))
id = tables.Column('id', verbose_name=_('Domain ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True)
class Meta:
name = "domains"
verbose_name = _("Domains")
row_actions = (SetDomainContext, UpdateUsersLink, UpdateGroupsLink,
EditDomainLink, DeleteDomainsAction)
table_actions = (DomainFilterAction, CreateDomainLink,
DeleteDomainsAction, UnsetDomainContext)
| |
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_volume
version_added: "2.2"
short_description: Manage storage volumes (standard and thin)
description:
- Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
extends_documentation_fragment:
- netapp.eseries
options:
state:
required: true
description:
- Whether the specified volume should exist or not.
choices: ['present', 'absent']
name:
required: true
description:
- The name of the volume to manage
storage_pool_name:
required: true
description:
- "Required only when requested state is 'present'. The name of the storage pool the volume should exist on."
size_unit:
description:
- The unit used to interpret the size parameter
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
size:
required: true
description:
- "Required only when state = 'present'. The size of the volume in (size_unit)."
segment_size_kb:
description:
- The segment size of the new volume
default: 512
thin_provision:
description:
- Whether the volume should be thin provisioned. Thin volumes can only be created on disk pools (raidDiskPool).
default: False
choices: ['yes','no','true','false']
thin_volume_repo_size:
description:
- Initial size of the thin volume repository volume (in size_unit)
required: True
thin_volume_max_repo_size:
description:
- Maximum size that the thin volume repository volume will automatically expand to
default: same as size (in size_unit)
ssd_cache_enabled:
description:
- Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
default: None (ignores existing SSD cache setting)
choices: ['yes','no','true','false']
data_assurance_enabled:
description:
- If data assurance should be enabled for the volume
default: false
# TODO: doc thin volume parameters
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
- name: No thin volume
netapp_e_volume:
ssid: "{{ ssid }}"
name: NewThinVolumeByAnsible
state: absent
log_path: /tmp/volume.log
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
when: check_volume
- name: No fat volume
netapp_e_volume:
ssid: "{{ ssid }}"
name: NewVolumeByAnsible
state: absent
log_path: /tmp/volume.log
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
when: check_volume
'''
RETURN = '''
---
msg:
description: State of volume
type: string
returned: always
sample: "Standard volume [workload_vol_1] has been created."
'''
import json
import logging
import time
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils.pycompat24 import get_exception
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def ifilter(predicate, iterable):
# python 2, 3 generic filtering.
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
class NetAppESeriesVolume(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
storage_pool_name=dict(type='str'),
size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
type='str'),
size=dict(type='int'),
segment_size_kb=dict(default=128, choices=[8, 16, 32, 64, 128, 256, 512], type='int'),
ssd_cache_enabled=dict(type='bool'), # no default, leave existing setting alone
data_assurance_enabled=dict(default=False, type='bool'),
thin_provision=dict(default=False, type='bool'),
thin_volume_repo_size=dict(type='int'),
thin_volume_max_repo_size=dict(type='int'),
# TODO: add cache, owning controller support, thin expansion policy, etc
log_path=dict(type='str'),
))
self.module = AnsibleModule(argument_spec=argument_spec,
required_if=[
('state', 'present', ['storage_pool_name', 'size']),
('thin_provision', 'true', ['thin_volume_repo_size'])
],
supports_check_mode=True)
p = self.module.params
log_path = p['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
self.debug = self._logger.debug
if log_path:
logging.basicConfig(level=logging.DEBUG, filename=log_path)
self.state = p['state']
self.ssid = p['ssid']
self.name = p['name']
self.storage_pool_name = p['storage_pool_name']
self.size_unit = p['size_unit']
self.size = p['size']
self.segment_size_kb = p['segment_size_kb']
self.ssd_cache_enabled = p['ssd_cache_enabled']
self.data_assurance_enabled = p['data_assurance_enabled']
self.thin_provision = p['thin_provision']
self.thin_volume_repo_size = p['thin_volume_repo_size']
self.thin_volume_max_repo_size = p['thin_volume_max_repo_size']
if not self.thin_volume_max_repo_size:
self.thin_volume_max_repo_size = self.size
self.validate_certs = p['validate_certs']
try:
self.api_usr = p['api_username']
self.api_pwd = p['api_password']
self.api_url = p['api_url']
except KeyError:
self.module.fail_json(msg="You must pass in api_username "
"and api_password and api_url to the module.")
def get_volume(self, volume_name):
self.debug('fetching volumes')
# fetch the list of volume objects and look for one with a matching name (we'll need to merge volumes and thin-volumes)
try:
(rc, volumes) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to obtain list of standard/thick volumes. Array Id [%s]. Error[%s]." % (self.ssid,
str(err)))
try:
self.debug('fetching thin-volumes')
(rc, thinvols) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
volumes.extend(thinvols)
self.debug("searching for volume '%s'" % volume_name)
volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
if volume_detail:
self.debug('found')
else:
self.debug('not found')
return volume_detail
def get_storage_pool(self, storage_pool_name):
self.debug("fetching storage pools")
# map the storage pool name to its id
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
self.debug("searching for storage pool '%s'" % storage_pool_name)
pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
if pool_detail:
self.debug('found')
else:
self.debug('not found')
return pool_detail
def create_volume(self, pool_id, name, size_unit, size, segment_size_kb, data_assurance_enabled):
volume_add_req = dict(
name=name,
poolId=pool_id,
sizeUnit=size_unit,
size=size,
segSize=segment_size_kb,
dataAssuranceEnabled=data_assurance_enabled,
)
self.debug("creating volume '%s'" % name)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
data=json.dumps(volume_add_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs,
timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
str(err)))
def create_thin_volume(self, pool_id, name, size_unit, size, thin_volume_repo_size,
thin_volume_max_repo_size, data_assurance_enabled):
thin_volume_add_req = dict(
name=name,
poolId=pool_id,
sizeUnit=size_unit,
virtualSize=size,
repositorySize=thin_volume_repo_size,
maximumRepositorySize=thin_volume_max_repo_size,
dataAssuranceEnabled=data_assurance_enabled,
)
self.debug("creating thin-volume '%s'" % name)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
data=json.dumps(thin_volume_add_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs,
timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
str(err)))
def delete_volume(self):
# delete the volume
self.debug("deleting volume '%s'" % self.volume_detail['name'])
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
self.volume_detail['id']),
method='DELETE', url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs, timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
str(err)))
@property
def volume_resource_name(self):
if self.volume_detail['thinProvisioned']:
return 'thin-volumes'
else:
return 'volumes'
@property
def volume_properties_changed(self):
return self.volume_ssdcache_setting_changed # or with other props here when extended
# TODO: add support for r/w cache settings, owning controller, scan settings, expansion policy, growth alert threshold
@property
def volume_ssdcache_setting_changed(self):
# None means ignore existing setting
if self.ssd_cache_enabled is not None and self.ssd_cache_enabled != self.volume_detail['flashCached']:
self.debug("flash cache setting changed")
return True
def update_volume_properties(self):
update_volume_req = dict()
# conditionally add values so we ignore unspecified props
if self.volume_ssdcache_setting_changed:
update_volume_req['flashCache'] = self.ssd_cache_enabled
self.debug("updating volume properties...")
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/%s/%s/" % (self.ssid, self.volume_resource_name,
self.volume_detail['id']),
data=json.dumps(update_volume_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to update volume properties. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
str(err)))
@property
def volume_needs_expansion(self):
current_size_bytes = int(self.volume_detail['capacity'])
requested_size_bytes = self.size * self._size_unit_map[self.size_unit]
# TODO: check requested/current repo volume size for thin-volumes as well
# TODO: do we need to build any kind of slop factor in here?
return requested_size_bytes > current_size_bytes
def expand_volume(self):
is_thin = self.volume_detail['thinProvisioned']
if is_thin:
# TODO: support manual repo expansion as well
self.debug('expanding thin volume')
thin_volume_expand_req = dict(
newVirtualSize=self.size,
sizeUnit=self.size_unit
)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes/%s/expand" % (self.ssid,
self.volume_detail[
'id']),
data=json.dumps(thin_volume_expand_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs, timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
str(err)))
# TODO: check return code
else:
self.debug('expanding volume')
volume_expand_req = dict(
expansionSize=self.size,
sizeUnit=self.size_unit
)
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
self.volume_detail['id']),
data=json.dumps(volume_expand_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
str(err)))
self.debug('polling for completion...')
while True:
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
self.volume_detail[
'id']),
method='GET', url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs)
except Exception:
err = get_exception()
self.module.fail_json(
msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]. Error[%s]." % (
self.name, self.ssid, str(err)))
action = resp['action']
percent_complete = resp['percentComplete']
self.debug('expand action %s, %s complete...' % (action, percent_complete))
if action == 'none':
self.debug('expand complete')
break
else:
time.sleep(5)
def apply(self):
changed = False
volume_exists = False
msg = None
self.volume_detail = self.get_volume(self.name)
if self.volume_detail:
volume_exists = True
if self.state == 'absent':
self.debug("CHANGED: volume exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# check requested volume size, see if expansion is necessary
if self.volume_needs_expansion:
self.debug(
"CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size,
self.size_unit,
self.volume_detail[
'capacity']))
changed = True
if self.volume_properties_changed:
self.debug("CHANGED: one or more volume properties have changed")
changed = True
else:
if self.state == 'present':
self.debug("CHANGED: volume does not exist, but requested state is 'present'")
changed = True
if changed:
if self.module.check_mode:
self.debug('skipping changes due to check mode')
else:
if self.state == 'present':
if not volume_exists:
pool_detail = self.get_storage_pool(self.storage_pool_name)
if not pool_detail:
self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
if self.thin_provision and not pool_detail['diskPool']:
self.module.fail_json(
msg='Thin provisioned volumes can only be located on disk pools (not volume groups)')
pool_id = pool_detail['id']
if not self.thin_provision:
self.create_volume(pool_id, self.name, self.size_unit, self.size, self.segment_size_kb,
self.data_assurance_enabled)
msg = "Standard volume [%s] has been created." % (self.name)
else:
self.create_thin_volume(pool_id, self.name, self.size_unit, self.size,
self.thin_volume_repo_size, self.thin_volume_max_repo_size,
self.data_assurance_enabled)
msg = "Thin volume [%s] has been created." % (self.name)
else: # volume exists but differs, modify...
if self.volume_needs_expansion:
self.expand_volume()
msg = "Volume [%s] has been expanded." % (self.name)
# this stuff always needs to run on present (since props can't be set on creation)
if self.volume_properties_changed:
self.update_volume_properties()
msg = "Properties of volume [%s] has been updated." % (self.name)
elif self.state == 'absent':
self.delete_volume()
msg = "Volume [%s] has been deleted." % (self.name)
else:
self.debug("exiting with no changes")
if self.state == 'absent':
msg = "Volume [%s] did not exist." % (self.name)
else:
msg = "Volume [%s] already exists." % (self.name)
self.module.exit_json(msg=msg, changed=changed)
def main():
v = NetAppESeriesVolume()
try:
v.apply()
except Exception:
e = get_exception()
v.debug("Exception in apply(): \n%s" % format_exc(e))
v.module.fail_json(msg="Module failed. Error [%s]." % (str(e)))
if __name__ == '__main__':
main()
| |
from __future__ import division
import math
import operator
import os
import numpy
import pyglet
from pyglet.gl import *
import yaml
from edge import *
from const import *
from model import *
from state import *
class Util(object):
'''Utility class with various helpful functions.'''
@staticmethod
def showHelp():
'''Prints command line usage help.'''
helpLines = [
("-h, --help", "Shows this help."),
("-d", "Displays debug information in-game. Use multiple times for more info."),
("-p", "Performance mode. Removes features, adds FPS. Use multiple times for more FPS."),
("--worldtype TYPE", "Choose a world type to generate. Options: {}".format(Const.WORLD_TYPES)),
("--seed STRING", "Specify a world seed to use. Uses system time by default.")
]
for line in helpLines:
print "{:<16}\t{}".format(*line)
@staticmethod
def loadModels(modeltype):
'''Loads flyweight models from yaml files given the model type'''
models = {}
defaultmodel = None
counter = -1
for dict in yaml.load_all(open("{}/{}models.yml".format(Const.RESOURCE_PATH, modeltype), 'r')):
counter += 1
dict['modeltype'] = modeltype
model = Model(dict)
models[dict['type']] = model
# Set the default model
if dict['type'] == 'default':
defaultmodel = model
elif 'defaultmodel' not in dict:
model.set('defaultmodel', defaultmodel)
print "Loaded {} {} models".format(counter, modeltype)
return models
@staticmethod
def circle(x, y, r):
blocks = []
for ix in xrange(int(x-r), int(x+r)):
for iy in xrange(int(y-r), int(y+r)):
if (ix-x)**2 + (iy-y)**2 < r**2:
blocks.append((ix, iy))
return blocks
@staticmethod
def line((x, y), (x2, y2)):
'''
Returns a list of all block coordinates between the two points, inclusive, using Brensenham's line algorithm.
From http://mail.scipy.org/pipermail/scipy-user/2009-September/022602.html
'''
steep = 0
coords = []
dx = abs(x2 - x)
if (x2 - x) > 0: sx = 1
else: sx = -1
dy = abs(y2 - y)
if (y2 - y) > 0: sy = 1
else: sy = -1
if dy > dx:
steep = 1
x,y = y,x
dx,dy = dy,dx
sx,sy = sy,sx
d = (2 * dy) - dx
for i in range(0,dx):
if steep: coords.append((y,x))
else: coords.append((x,y))
while d >= 0:
y = y + sy
d = d - (2 * dx)
x = x + sx
d = d + (2 * dy)
return coords #added by me
@staticmethod
def distancePoint(a, b):
ax, ay = a
bx, by = b
return math.sqrt((bx-ax)**2 + (by-ay)**2)
@staticmethod
def getAdjacentCoords(loc, world=None, multiLayer=False):
'''
Returns all coords directly adjacent to the given coords (i.e. left, right, above, and below).
If a world is specified, it will only return coordinates valid for the world.
This works on any "world" object that has a "isValidCoords" method, including World, WorldGen, WorldLayer, and Chunk.
If multiLayer is enabled, it will also return the coords behind and in front, with the layer in each tuple.
The layer of the input coords must be specified as the third element in the location tuple if using multiLayer.
'''
x, y = loc[:2]
if multiLayer:
l = loc[2]
checkCoords = [(x+1,y,l),(x-1,y,l),(x,y+1,l),(x,y-1,l),(x,y,l+1),(x,y,l-1)]
else:
checkCoords = (x+1,y),(x-1,y),(x,y+1),(x,y-1)
if world is not None:
validCoords = []
for coords in checkCoords:
if world.isValidCoords(coords):
validCoords.append(coords)
else:
validCoords = checkCoords
return validCoords
@staticmethod
def add_tuple(*args):
return tuple(map(sum, zip(*args)))
@staticmethod
def sub_tuple(a, b):
return tuple(map(operator.sub, a, b))
@staticmethod
def mul_tuple(a, b):
return tuple(map(operator.mul, a, b))
@staticmethod
def div_tuple(a, b):
return tuple(map(operator.div, a, b))
@staticmethod
def int_tuple(a):
return tuple(map(int, a))
@staticmethod
def int_floor(a):
return tuple(map(int, map(math.floor, a)))
@staticmethod
def getLineOfSightBlocks((dx, dy), world, loc, l=1, maxblocks=None, maxdistance=None):
'''Returns a list of all coordinates up to and including the first solid block found in 'world' on layer 'l' at 'loc' in the direction of the unit vector '(dx, dy)'. Stops checking if maxdistance or maxblocks are specified and reached. The returned list is ordered from closest to farthest.'''
blocks = []
# Special case: Direction is zero. Return current block.
if dx == 0 and dy == 0:
if world.isSolidAt(loc, l=l):
blocks.append(Util.int_floor(loc))
# Special case: Direction is vertical or horizontal. Add direction vector to location until solid block is found.
elif dx == 0 or dy == 0:
newloc = loc
while True:
blocks.append(Util.int_floor(newloc))
if world.isSolidAt(newloc, l=l) is not False: break # Stop once we hit a solid block or the edge of the world.
if maxblocks is not None and len(blocks) >= maxblocks: break # Stop once we hit the maxblocks limit.
if maxdistance is not None and Util.distancePoint(loc, newloc) >= maxdistance: break # Stop once we hit the maxdistance limit.
newloc = Util.add_tuple(newloc, (dx, dy))
# Otherwise, use Bresenham's line algorithm.
else:
slope = dy / dx
x, y = loc
while True:
blocks.append(Util.int_floor((x, y)))
if world.isSolidAt((x, y), l=l) is not False: break # Stop once we hit a solid block or the edge of the world.
if maxblocks is not None and len(blocks) >= maxblocks: break # Stop once we hit the maxblocks limit.
if maxdistance is not None and Util.distancePoint(loc, (x, y)) >= maxdistance: break # Stop once we hit the maxdistance limit.
x += dx
y = slope * (x - loc[0]) + loc[1]
return blocks
@staticmethod
def getClosestSolidBlock((dx, dy), world, loc, l=1, maxdistance=None):
'''Returns the coordinates of the closest solid block in 'world' on layer 'l' at 'loc' in the direction of the unit vector '(dx, dy)'. Stops checking if maxdistance is specified and reached.'''
blocks = Util.getLineOfSightBlocks((dx, dy), world, loc, l, maxdistance=maxdistance)
if len(blocks) == 0: return None
else: return blocks[-1]
@staticmethod
def getNearbySolidBlocks(entity):
bb = entity.shape.getAABB(entity.body.transform, 0)
blocks = []
for x in xrange(int(bb.lowerBound[0] - 1), int(bb.upperBound[0] + 2)):
for y in xrange(int(bb.lowerBound[1] - 1), int(bb.upperBound[1] + 2)):
if entity.world.isSolidAt((x, y)):
blocks.append(Util.int_floor((x, y)))
return blocks
@staticmethod
def getSurroundingBlocks(coords, r=1):
'''Returns the blocks around (in square formation) the given block coordinates within the given range r, not including the center block.'''
x, y = Util.int_floor(coords)
blocks = []
for bx in xrange(x-r, x+r+1):
for by in xrange(y-r, y+r+1):
blocks.append((bx, by))
blocks.remove(coords)
return blocks
@staticmethod
def addDebugStats(texts):
'''Adds new debug stats to the HUD'''
for text in texts:
number = len(State().debugStats)
label = pyglet.text.Label(text, font_size=14, color=(228, 228, 0, 255), batch=State().batch, group=State().group['debug'])
State().debugStats.append((number, label, text))
@staticmethod
def prepareDrawDebugStats():
'''Updates existing debug stats to prepare the for drawing'''
for (number, label, text) in State().debugStats:
label.begin_update()
label.text = eval(text)
label.y = State().window.height-(number+1)*16
label.end_update()
@staticmethod
def getScreenCenter():
'''Returns the on-screen pixel coordinates to the pixel in the middle of the screen'''
return (State().window.width / 2, State().window.height / 2)
@staticmethod
def blocksToPixels((bx, by)):
'''Returns the on-screen pixel coordinates to the lower left corner pixel of the given block'''
camX, camY = State().cameraPos
px = (bx - camX) * Const.PPB * Const.ZOOM + (State().window.width / 2)
py = (by - camY) * Const.PPB * Const.ZOOM + (State().window.height / 2 + 1)
return (px, py)
@staticmethod
def pixelsToBlocks((px, py)):
'''Returns the world coordinates of the block at the given on-screen pixel coordinates'''
camX, camY = State().cameraPos
bx = math.floor((math.floor(px) - (State().window.width / 2)) / Const.PPB / Const.ZOOM + camX)
by = math.floor((math.floor(py) - (State().window.height / 2)) / Const.PPB / Const.ZOOM + camY)
return (bx, by)
@staticmethod
def blocksToChunks((x, y)):
'''Returns the coordinates of the chunk containing the block at the given coords. Does not guarantee that the chunk exists, just that that block would mathematically be there.'''
return (x//Const.CHUNK_SIZE, y//Const.CHUNK_SIZE)
@staticmethod
def chunksToBlocks((x, y)):
'''Returns the coordinates of the lower-left-most block in the chunk at the given chunk coords.'''
return (x*Const.CHUNK_SIZE, y*Const.CHUNK_SIZE)
@staticmethod
def getInChunkCoords((x, y)):
'''Returns the in-chunk coordinates of the block at the given coords. Does not guarantee that the chunk exists, just that that block would mathematically be there.'''
return (x%Const.CHUNK_SIZE, y%Const.CHUNK_SIZE)
@staticmethod
def getOnscreenBlocks():
'''Returns a list of (x, y) coordinates to all blocks that are onscreen.'''
window = State().window
camX, camY = State().cameraPos
blocksOutHor = window.width / 2 / Const.ZOOM / Const.PPB + 1
blocksOutVert = window.height / 2 / Const.ZOOM / Const.PPB + 1
blocks = []
for y in xrange(int(camY - blocksOutVert), int(camY + blocksOutVert)):
for x in xrange(int(camX - blocksOutHor), int(camX + blocksOutHor)):
blocks.append((x, y))
return blocks
@staticmethod
def isBlockOnScreen(coords):
'''Returns True if the block at the given coordinates is on the screen.'''
return coords in Util.getOnscreenBlocks()
@staticmethod
def isBlockOnScreen2((x, y)):
'''Returns True if the block at the given coordinates is on the screen.'''
window = State().window
camX, camY = State().cameraPos
blocksOutHor = window.width / 2 / Const.ZOOM / Const.PPB + 1
blocksOutVert = window.height / 2 / Const.ZOOM / Const.PPB + 1
xmin = int(camX - blocksOutHor)
xmax = int(camX + blocksOutHor)
ymin = int(camY - blocksOutVert)
ymax = int(camY + blocksOutVert)
return xmin < x and x < xmax and ymin < y and y < ymax
@staticmethod
def getOnscreenChunks(world):
'''Returns a list of (x, y) coordinates to all chunks that are onscreen. Uses world.width and world.height to validate the coords.'''
window = State().window
camX, camY = State().cameraPos
blocksOutHor = window.width / 2 / Const.ZOOM / Const.PPB + 1
blocksOutVert = window.height / 2 / Const.ZOOM / Const.PPB + 1
chunks = set()
xmin = int(max(camX - blocksOutHor, 0))
xmax = int(min(camX + blocksOutHor, world.width))
ymin = int(max(camY - blocksOutVert, 0))
ymax = int(min(camY + blocksOutVert, world.height))
for y in xrange(ymin, ymax+Const.CHUNK_SIZE, Const.CHUNK_SIZE):
for x in xrange(xmin, xmax+Const.CHUNK_SIZE, Const.CHUNK_SIZE):
chunks.add((x//Const.CHUNK_SIZE, y//Const.CHUNK_SIZE))
return chunks
@staticmethod
def isChunkOnScreen(coords):
'''Returns True if the chunk at the given coordinates is on the screen.'''
return coords in Util.getOnscreenChunks()
@staticmethod
def isChunkOnScreen2((cx, cy)):
'''Returns True if the chunk at the given coordinates is on the screen.'''
window = State().window
camX, camY = State().cameraPos
blocksOutHor = window.width / 2 / Const.ZOOM / Const.PPB + 1 + Const.CHUNK_SIZE
blocksOutVert = window.height / 2 / Const.ZOOM / Const.PPB + 1 + Const.CHUNK_SIZE
xmin = int(camX - blocksOutHor)
xmax = int(camX + blocksOutHor)
ymin = int(camY - blocksOutVert)
ymax = int(camY + blocksOutVert)
cx *= Const.CHUNK_SIZE
cy *= Const.CHUNK_SIZE
return xmin < cx and cx < xmax and ymin < cy and cy < ymax
@staticmethod
def physics_getBlockCoords(entities):
'''Returns a list of block coordinates to be used for physics calculations based on those nearest to entities.'''
blocks = []
for entity in entities:
#blocks.append(Util.getClosestSolidBlock(entity.body.velocity.normalized(), entity.world, entity.body.position))
#blocks.append(Util.getClosestSolidBlock(Const.DOWN, entity.world, entity.body.position))
blocks.extend(Util.getNearbySolidBlocks(entity))
return blocks
@staticmethod
def physics_getEdgeCoords(entities):
'''Returns a list of edge coords in (vertices, location) format for blocks near the given entities, one for each line segment of each block's hitbox. The 'vertices' variable is a tuple of vertices.'''
coords = Util.physics_getBlockCoords(entities)
edges = []
for coord in coords:
points = State().world.getBlockAt(coord).get('hitbox')
lines = Util.polygonPointsToLines(points)
for line in lines:
edges.append((line, coord))
return edges
@staticmethod
def physics_updateEdgePhysics(newEdgeCoords):
'''Updates the cache of edges to be used for physics calculations given a list of edges in (vertices, location) format to use. EdgePhysics objects are created and deleted as necessary.'''
# Stop simulating edges that are no longer relevant.
for oldEdgeCoord, oldEdgePhysics in State().physics_edgePhysics.items():
if oldEdgeCoord not in newEdgeCoords:
State().space.DestroyBody(oldEdgePhysics.body)
del State().physics_edgePhysics[oldEdgeCoord]
# Create new EdgePhysics objects for edges that are relevant (if they don't already exist).
for newEdgeCoord in newEdgeCoords:
if newEdgeCoord not in State().physics_edgePhysics:
State().physics_edgePhysics[newEdgeCoord] = EdgePhysics(list(newEdgeCoord[0]), newEdgeCoord[1])
@staticmethod
def drawDebugPhysicsBlocks():
'''Highlights all blocks currently being used for physics calculations.'''
coords = State().physics_blockCoords
for coord in coords:
points = [Util.blocksToPixels(point) for point in Util.blockToSquarePoints(coord)]
Util.drawPolygonHighlight(points, Const.COLORS['DEBUG_PHYSICS_BLOCK_HIGHLIGHT'])
@staticmethod
def drawDebugPhysicsBlockHitboxes():
'''Draws the hitboxes of all blocks currently being used for physics calculations.'''
coords = State().physics_blockCoords
for coord in coords:
points = [Util.blocksToPixels(Util.add_tuple(coord, point)) for point in State().world.getBlockAt(coord).get('hitbox')]
Util.drawPolygonOutline(points, Const.COLORS['DEBUG_PHYSICS_BLOCK_HITBOX'])
@staticmethod
def drawDebugTargetBlock():
'''Highlights the currently targeted block.'''
coord = Util.pixelsToBlocks(State().mouseLoc)
points = [Util.blocksToPixels(point) for point in Util.blockToSquarePoints(coord)]
Util.drawPolygonHighlight(points, Const.COLORS['DEBUG_TARGET_BLOCK_HIGHLIGHT'])
@staticmethod
def drawDebugChunkBorders():
'''Draws chunk borders for debugging.'''
for _,chunk in numpy.ndenumerate(State().world.layers[1].chunks):
chunk.drawDebugBorders()
@staticmethod
def drawDebugPhysicsEntityHitboxes():
'''Draws the hitboxes of all entities in the world.'''
allEntities = [State().player]
for entity in allEntities:
entity.drawDebugHitbox()
@staticmethod
def createGLDataList(points, color):
datalist = (('v2f', sum(points, ())), ('c4B', color * len(points)))
return datalist
@staticmethod
def drawPolygonOutline(points, color):
'''Draws the outline of a polygon defined by the given points in pixel coordinates.'''
for (a, b) in Util.polygonPointsToLines(points):
data = Util.createGLDataList((a, b), color)
pyglet.graphics.draw(2, pyglet.gl.GL_LINES, *data)
@staticmethod
def drawPolygonHighlight(points, color):
'''Draws a filled polygon defined by the given points in pixel coordinates.'''
data = Util.createGLDataList(points, color)
pyglet.graphics.draw(len(points), pyglet.gl.GL_POLYGON, *data)
@staticmethod
def blockToSquarePoints((x, y)):
'''Returns a list of points that describe the square at the given coordinates.'''
return [(x, y), (x+1, y), (x+1, y+1), (x, y+1)]
@staticmethod
def polygonPointsToLines(polygon):
'''Converts a list of polygon point tuples to a list of polygon line tuples.'''
lines = []
for i in xrange(len(polygon)):
lines.append((polygon[i-1], polygon[i]))
return lines
| |
import base64
import binascii
import struct
from enum import Enum
from xdrlib import Packer, Unpacker
from . import xdr as stellar_xdr
from .exceptions import (
Ed25519PublicKeyInvalidError,
Ed25519SecretSeedInvalidError,
MuxedEd25519AccountInvalidError,
TypeError,
ValueError,
)
from .type_checked import type_checked
__all__ = ["StrKey"]
class _VersionByte(Enum):
ED25519_PUBLIC_KEY = binascii.a2b_hex("30") # G 48 6 << 3
ED25519_SECRET_SEED = binascii.a2b_hex("90") # S 144 18 << 3
PRE_AUTH_TX = binascii.a2b_hex("98") # T 152 19 << 3
SHA256_HASH = binascii.a2b_hex("b8") # X 184 23 << 3
MUXED_ACCOUNT = binascii.a2b_hex("60") # M 96 12 << 3
@type_checked
class StrKey:
"""StrKey is a helper class that allows encoding and decoding strkey."""
@staticmethod
def encode_ed25519_public_key(data: bytes) -> str:
"""Encodes data to encoded ed25519 public key strkey.
:param data: data to encode
:return: encoded ed25519 public key strkey
:raises:
:exc:`ValueError <stellar_sdk.exceptions.ValueError>`
"""
return _encode_check(_VersionByte.ED25519_PUBLIC_KEY, data)
@staticmethod
def decode_ed25519_public_key(data: str) -> bytes:
"""Decodes encoded ed25519 public key strkey to raw data.
:param data: encoded ed25519 public key strkey
:return: raw bytes
:raises:
:exc:`Ed25519PublicKeyInvalidError <stellar_sdk.exceptions.Ed25519PublicKeyInvalidError>`
"""
try:
return _decode_check(_VersionByte.ED25519_PUBLIC_KEY, data)
except Exception:
raise Ed25519PublicKeyInvalidError(f"Invalid Ed25519 Public Key: {data}")
@staticmethod
def is_valid_ed25519_public_key(public_key: str) -> bool:
"""Returns ``True`` if the given `seed` is a valid ed25519 public key strkey.
:param public_key: encoded ed25519 public key strkey
:return: ``True`` if the given key is valid
"""
return _is_valid(_VersionByte.ED25519_PUBLIC_KEY, public_key)
@staticmethod
def encode_ed25519_secret_seed(data: bytes) -> str:
"""Encodes data to encoded ed25519 secret seed strkey.
:param data: data to encode
:return: encoded ed25519 secret seed strkey
:raises:
:exc:`ValueError <stellar_sdk.exceptions.ValueError>`
"""
return _encode_check(_VersionByte.ED25519_SECRET_SEED, data)
@staticmethod
def decode_ed25519_secret_seed(data: str) -> bytes:
"""Decodes encoded ed25519 secret seed strkey to raw data.
:param data: encoded ed25519 secret seed strkey
:return: raw bytes
:raises:
:exc:`Ed25519SecretSeedInvalidError <stellar_sdk.exceptions.Ed25519SecretSeedInvalidError>`
"""
try:
return _decode_check(_VersionByte.ED25519_SECRET_SEED, data)
except Exception:
raise Ed25519SecretSeedInvalidError(f"Invalid Ed25519 Secret Seed: {data}")
@staticmethod
def is_valid_ed25519_secret_seed(seed: str) -> bool:
"""Returns ``True`` if the given `seed` is a valid ed25519 secret seed strkey.
:param seed: encoded ed25519 secret seed strkey
:return: ``True`` if the given key is valid
"""
return _is_valid(_VersionByte.ED25519_SECRET_SEED, seed)
@staticmethod
def encode_pre_auth_tx(data: bytes) -> str:
"""Encodes data to encoded pre auth tx strkey.
:param data: data to encode
:return: encoded pre auth tx strkey
:raises:
:exc:`ValueError <stellar_sdk.exceptions.ValueError>`
"""
return _encode_check(_VersionByte.PRE_AUTH_TX, data)
@staticmethod
def decode_pre_auth_tx(data: str) -> bytes:
"""Decodes encoded pre auth tx strkey to raw data.
:param data: encoded pre auth tx strkey
:return: raw bytes
:raises:
:exc:`ValueError <stellar_sdk.exceptions.ValueError>`
"""
try:
return _decode_check(_VersionByte.PRE_AUTH_TX, data)
except Exception as e:
raise ValueError(f"Invalid Pre Auth Tx Key: {data}") from e
@staticmethod
def is_valid_pre_auth_tx(pre_auth_tx: str) -> bool:
"""Returns ``True`` if the given `pre_auth_tx` is a valid encoded pre auth tx strkey.
:param pre_auth_tx: encoded pre auth tx strkey
:return: ``True`` if the given key is valid
"""
return _is_valid(_VersionByte.PRE_AUTH_TX, pre_auth_tx)
@staticmethod
def encode_sha256_hash(data: bytes) -> str:
"""Encodes data to encoded sha256 hash strkey.
:param data: data to encode
:return: encoded sha256 hash strkey
:raises:
:exc:`ValueError <stellar_sdk.exceptions.ValueError>`
"""
return _encode_check(_VersionByte.SHA256_HASH, data)
@staticmethod
def decode_sha256_hash(data: str) -> bytes:
"""Decodes encoded sha256 hash strkey to raw data.
:param data: encoded sha256 hash strkey
:return: raw bytes
:raises:
:exc:`ValueError <stellar_sdk.exceptions.ValueError>`
"""
try:
return _decode_check(_VersionByte.SHA256_HASH, data)
except Exception as e:
raise ValueError(f"Invalid sha256 Hash Key: {data}") from e
@staticmethod
def is_valid_sha256_hash(sha256_hash: str) -> bool:
"""Returns ``True`` if the given `sha256_hash` is a valid encoded sha256 hash(HashX) strkey.
:param sha256_hash: encoded sha256 hash(HashX) strkey
:return: ``True`` if the given key is valid
"""
return _is_valid(_VersionByte.SHA256_HASH, sha256_hash)
@staticmethod
def encode_muxed_account(data: stellar_xdr.MuxedAccount) -> str:
"""Encodes data to encoded muxed account strkey.
:param data: data to encode
:return: encoded muxed account strkey
:raises:
:exc:`ValueError <stellar_sdk.exceptions.ValueError>`
"""
if data.type == stellar_xdr.CryptoKeyType.KEY_TYPE_ED25519:
assert data.ed25519 is not None
return StrKey.encode_ed25519_public_key(data.ed25519.uint256)
assert data.med25519 is not None
packer = Packer()
data.med25519.ed25519.pack(packer)
data.med25519.id.pack(packer)
return _encode_check(_VersionByte.MUXED_ACCOUNT, packer.get_buffer())
@staticmethod
def decode_muxed_account(data: str) -> stellar_xdr.MuxedAccount:
"""Decodes encoded muxed account strkey to raw data.
:param data: encoded muxed account strkey
:return: raw bytes
:raises:
:exc:`ValueError <stellar_sdk.exceptions.ValueError>`
"""
data_length = len(data)
if data_length == 56:
muxed = stellar_xdr.MuxedAccount(
type=stellar_xdr.CryptoKeyType.KEY_TYPE_ED25519,
ed25519=stellar_xdr.Uint256(StrKey.decode_ed25519_public_key(data)),
)
elif data_length == 69:
# let's optimize it in v3.
try:
xdr_bytes = _decode_check(_VersionByte.MUXED_ACCOUNT, data)
except Exception:
raise MuxedEd25519AccountInvalidError(
"Invalid Muxed Account: {}".format(data)
)
unpacker = Unpacker(xdr_bytes)
ed25519 = stellar_xdr.Uint256.unpack(unpacker)
id = stellar_xdr.Uint64.unpack(unpacker)
med25519 = stellar_xdr.MuxedAccountMed25519(
id=id,
ed25519=ed25519,
)
muxed = stellar_xdr.MuxedAccount(
type=stellar_xdr.CryptoKeyType.KEY_TYPE_MUXED_ED25519, med25519=med25519
)
else:
raise ValueError("Invalid encoded string, this is not a valid account.")
return muxed
@type_checked
def _decode_check(version_byte: _VersionByte, encoded: str) -> bytes:
encoded_data = encoded.encode("ascii")
encoded_data = encoded_data + b"=" * ((4 - len(encoded_data) % 4) % 4)
try:
decoded_data = base64.b32decode(encoded_data)
except binascii.Error:
raise ValueError("Incorrect padding.")
if encoded_data != base64.b32encode(decoded_data): # Is that even possible?
raise ValueError("Invalid encoded bytes.")
version_byte_in_data = decoded_data[0:1]
payload = decoded_data[0:-2]
data = decoded_data[1:-2]
checksum = decoded_data[-2:]
if version_byte.value != version_byte_in_data:
raise ValueError(
f"Invalid version byte. Expected {version_byte.value!r}, got {version_byte_in_data!r}"
)
expected_checksum = _calculate_checksum(payload)
if expected_checksum != checksum:
raise ValueError("Invalid checksum")
return data
@type_checked
def _encode_check(version_byte: _VersionByte, data: bytes) -> str:
payload = version_byte.value + data
crc = _calculate_checksum(payload)
return base64.b32encode(payload + crc).decode("utf-8").rstrip("=")
@type_checked
def _is_valid(version_byte: _VersionByte, encoded: str) -> bool:
if encoded and len(encoded) != 56:
return False
try:
_decode_check(version_byte, encoded)
except (ValueError, TypeError):
return False
return True
@type_checked
def _calculate_checksum(payload: bytes) -> bytes:
# memo note: https://gist.github.com/manran/a8357808ef71415d266dc64f0079f298
# This code calculates CRC16-XModem checksum of payload
checksum = binascii.crc_hqx(payload, 0)
# Ensure that the checksum is in LSB order.
return struct.pack("<H", checksum)
| |
import dis
import unittest
from test.support.bytecode_helper import BytecodeTestCase
def count_instr_recursively(f, opname):
count = 0
for instr in dis.get_instructions(f):
if instr.opname == opname:
count += 1
if hasattr(f, '__code__'):
f = f.__code__
for c in f.co_consts:
if hasattr(c, 'co_code'):
count += count_instr_recursively(c, opname)
return count
class TestTranforms(BytecodeTestCase):
def check_jump_targets(self, code):
instructions = list(dis.get_instructions(code))
targets = {instr.offset: instr for instr in instructions}
for instr in instructions:
if 'JUMP_' not in instr.opname:
continue
tgt = targets[instr.argval]
# jump to unconditional jump
if tgt.opname in ('JUMP_ABSOLUTE', 'JUMP_FORWARD'):
self.fail(f'{instr.opname} at {instr.offset} '
f'jumps to {tgt.opname} at {tgt.offset}')
# unconditional jump to RETURN_VALUE
if (instr.opname in ('JUMP_ABSOLUTE', 'JUMP_FORWARD') and
tgt.opname == 'RETURN_VALUE'):
self.fail(f'{instr.opname} at {instr.offset} '
f'jumps to {tgt.opname} at {tgt.offset}')
# JUMP_IF_*_OR_POP jump to conditional jump
if '_OR_POP' in instr.opname and 'JUMP_IF_' in tgt.opname:
self.fail(f'{instr.opname} at {instr.offset} '
f'jumps to {tgt.opname} at {tgt.offset}')
def check_lnotab(self, code):
"Check that the lnotab byte offsets are sensible."
code = dis._get_code_object(code)
lnotab = list(dis.findlinestarts(code))
# Don't bother checking if the line info is sensible, because
# most of the line info we can get at comes from lnotab.
min_bytecode = min(t[0] for t in lnotab)
max_bytecode = max(t[0] for t in lnotab)
self.assertGreaterEqual(min_bytecode, 0)
self.assertLess(max_bytecode, len(code.co_code))
# This could conceivably test more (and probably should, as there
# aren't very many tests of lnotab), if peepholer wasn't scheduled
# to be replaced anyway.
def test_unot(self):
# UNARY_NOT POP_JUMP_IF_FALSE --> POP_JUMP_IF_TRUE'
def unot(x):
if not x == 2:
del x
self.assertNotInBytecode(unot, 'UNARY_NOT')
self.assertNotInBytecode(unot, 'POP_JUMP_IF_FALSE')
self.assertInBytecode(unot, 'POP_JUMP_IF_TRUE')
self.check_lnotab(unot)
def test_elim_inversion_of_is_or_in(self):
for line, cmp_op, invert in (
('not a is b', 'IS_OP', 1,),
('not a is not b', 'IS_OP', 0,),
('not a in b', 'CONTAINS_OP', 1,),
('not a not in b', 'CONTAINS_OP', 0,),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, cmp_op, invert)
self.check_lnotab(code)
def test_global_as_constant(self):
# LOAD_GLOBAL None/True/False --> LOAD_CONST None/True/False
def f():
x = None
x = None
return x
def g():
x = True
return x
def h():
x = False
return x
for func, elem in ((f, None), (g, True), (h, False)):
self.assertNotInBytecode(func, 'LOAD_GLOBAL')
self.assertInBytecode(func, 'LOAD_CONST', elem)
self.check_lnotab(func)
def f():
'Adding a docstring made this test fail in Py2.5.0'
return None
self.assertNotInBytecode(f, 'LOAD_GLOBAL')
self.assertInBytecode(f, 'LOAD_CONST', None)
self.check_lnotab(f)
def test_while_one(self):
# Skip over: LOAD_CONST trueconst POP_JUMP_IF_FALSE xx
def f():
while 1:
pass
return list
for elem in ('LOAD_CONST', 'POP_JUMP_IF_FALSE'):
self.assertNotInBytecode(f, elem)
for elem in ('JUMP_ABSOLUTE',):
self.assertInBytecode(f, elem)
self.check_lnotab(f)
def test_pack_unpack(self):
for line, elem in (
('a, = a,', 'LOAD_CONST',),
('a, b = a, b', 'ROT_TWO',),
('a, b, c = a, b, c', 'ROT_THREE',),
):
code = compile(line,'','single')
self.assertInBytecode(code, elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
self.assertNotInBytecode(code, 'UNPACK_TUPLE')
self.check_lnotab(code)
def test_folding_of_tuples_of_constants(self):
for line, elem in (
('a = 1,2,3', (1, 2, 3)),
('("a","b","c")', ('a', 'b', 'c')),
('a,b,c = 1,2,3', (1, 2, 3)),
('(None, 1, None)', (None, 1, None)),
('((1, 2), 3, 4)', ((1, 2), 3, 4)),
):
code = compile(line,'','single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_TUPLE')
self.check_lnotab(code)
# Long tuples should be folded too.
code = compile(repr(tuple(range(10000))),'','single')
self.assertNotInBytecode(code, 'BUILD_TUPLE')
# One LOAD_CONST for the tuple, one for the None return value
load_consts = [instr for instr in dis.get_instructions(code)
if instr.opname == 'LOAD_CONST']
self.assertEqual(len(load_consts), 2)
self.check_lnotab(code)
# Bug 1053819: Tuple of constants misidentified when presented with:
# . . . opcode_with_arg 100 unary_opcode BUILD_TUPLE 1 . . .
# The following would segfault upon compilation
def crater():
(~[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
],)
self.check_lnotab(crater)
def test_folding_of_lists_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_LIST should be folded to a tuple:
('a in [1,2,3]', (1, 2, 3)),
('a not in ["a","b","c"]', ('a', 'b', 'c')),
('a in [None, 1, None]', (None, 1, None)),
('a not in [(1, 2), 3, 4]', ((1, 2), 3, 4)),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertNotInBytecode(code, 'BUILD_LIST')
self.check_lnotab(code)
def test_folding_of_sets_of_constants(self):
for line, elem in (
# in/not in constants with BUILD_SET should be folded to a frozenset:
('a in {1,2,3}', frozenset({1, 2, 3})),
('a not in {"a","b","c"}', frozenset({'a', 'c', 'b'})),
('a in {None, 1, None}', frozenset({1, None})),
('a not in {(1, 2), 3, 4}', frozenset({(1, 2), 3, 4})),
('a in {1, 2, 3, 3, 2, 1}', frozenset({1, 2, 3})),
):
code = compile(line, '', 'single')
self.assertNotInBytecode(code, 'BUILD_SET')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.check_lnotab(code)
# Ensure that the resulting code actually works:
def f(a):
return a in {1, 2, 3}
def g(a):
return a not in {1, 2, 3}
self.assertTrue(f(3))
self.assertTrue(not f(4))
self.check_lnotab(f)
self.assertTrue(not g(3))
self.assertTrue(g(4))
self.check_lnotab(g)
def test_folding_of_binops_on_constants(self):
for line, elem in (
('a = 2+3+4', 9), # chained fold
('"@"*4', '@@@@'), # check string ops
('a="abc" + "def"', 'abcdef'), # check string ops
('a = 3**4', 81), # binary power
('a = 3*4', 12), # binary multiply
('a = 13//4', 3), # binary floor divide
('a = 14%4', 2), # binary modulo
('a = 2+3', 5), # binary add
('a = 13-4', 9), # binary subtract
('a = (12,13)[1]', 13), # binary subscr
('a = 13 << 2', 52), # binary lshift
('a = 13 >> 2', 3), # binary rshift
('a = 13 & 7', 5), # binary and
('a = 13 ^ 7', 10), # binary xor
('a = 13 | 7', 15), # binary or
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('BINARY_'))
self.check_lnotab(code)
# Verify that unfoldables are skipped
code = compile('a=2+"b"', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 2)
self.assertInBytecode(code, 'LOAD_CONST', 'b')
self.check_lnotab(code)
# Verify that large sequences do not result from folding
code = compile('a="x"*10000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 10000)
self.assertNotIn("x"*10000, code.co_consts)
self.check_lnotab(code)
code = compile('a=1<<1000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 1000)
self.assertNotIn(1<<1000, code.co_consts)
self.check_lnotab(code)
code = compile('a=2**1000', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 1000)
self.assertNotIn(2**1000, code.co_consts)
self.check_lnotab(code)
def test_binary_subscr_on_unicode(self):
# valid code get optimized
code = compile('"foo"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', 'f')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
self.check_lnotab(code)
code = compile('"\u0061\uffff"[1]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\uffff')
self.assertNotInBytecode(code,'BINARY_SUBSCR')
self.check_lnotab(code)
# With PEP 393, non-BMP char get optimized
code = compile('"\U00012345"[0]', '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', '\U00012345')
self.assertNotInBytecode(code, 'BINARY_SUBSCR')
self.check_lnotab(code)
# invalid code doesn't get optimized
# out of range
code = compile('"fuu"[10]', '', 'single')
self.assertInBytecode(code, 'BINARY_SUBSCR')
self.check_lnotab(code)
def test_folding_of_unaryops_on_constants(self):
for line, elem in (
('-0.5', -0.5), # unary negative
('-0.0', -0.0), # -0.0
('-(1.0-1.0)', -0.0), # -0.0 after folding
('-0', 0), # -0
('~-2', 1), # unary invert
('+1', 1), # unary positive
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
self.check_lnotab(code)
# Check that -0.0 works after marshaling
def negzero():
return -(1.0-1.0)
for instr in dis.get_instructions(negzero):
self.assertFalse(instr.opname.startswith('UNARY_'))
self.check_lnotab(negzero)
# Verify that unfoldables are skipped
for line, elem, opname in (
('-"abc"', 'abc', 'UNARY_NEGATIVE'),
('~"abc"', 'abc', 'UNARY_INVERT'),
):
code = compile(line, '', 'single')
self.assertInBytecode(code, 'LOAD_CONST', elem)
self.assertInBytecode(code, opname)
self.check_lnotab(code)
def test_elim_extra_return(self):
# RETURN LOAD_CONST None RETURN --> RETURN
def f(x):
return x
self.assertNotInBytecode(f, 'LOAD_CONST', None)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 1)
self.check_lnotab(f)
def test_elim_jump_to_return(self):
# JUMP_FORWARD to RETURN --> RETURN
def f(cond, true_value, false_value):
# Intentionally use two-line expression to test issue37213.
return (true_value if cond
else false_value)
self.check_jump_targets(f)
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertEqual(len(returns), 2)
self.check_lnotab(f)
def test_elim_jump_to_uncond_jump(self):
# POP_JUMP_IF_FALSE to JUMP_FORWARD --> POP_JUMP_IF_FALSE to non-jump
def f():
if a:
# Intentionally use two-line expression to test issue37213.
if (c
or d):
foo()
else:
baz()
self.check_jump_targets(f)
self.check_lnotab(f)
def test_elim_jump_to_uncond_jump2(self):
# POP_JUMP_IF_FALSE to JUMP_ABSOLUTE --> POP_JUMP_IF_FALSE to non-jump
def f():
while a:
# Intentionally use two-line expression to test issue37213.
if (c
or d):
a = foo()
self.check_jump_targets(f)
self.check_lnotab(f)
def test_elim_jump_to_uncond_jump3(self):
# Intentionally use two-line expressions to test issue37213.
# JUMP_IF_FALSE_OR_POP to JUMP_IF_FALSE_OR_POP --> JUMP_IF_FALSE_OR_POP to non-jump
def f(a, b, c):
return ((a and b)
and c)
self.check_jump_targets(f)
self.check_lnotab(f)
self.assertEqual(count_instr_recursively(f, 'JUMP_IF_FALSE_OR_POP'), 2)
# JUMP_IF_TRUE_OR_POP to JUMP_IF_TRUE_OR_POP --> JUMP_IF_TRUE_OR_POP to non-jump
def f(a, b, c):
return ((a or b)
or c)
self.check_jump_targets(f)
self.check_lnotab(f)
self.assertEqual(count_instr_recursively(f, 'JUMP_IF_TRUE_OR_POP'), 2)
# JUMP_IF_FALSE_OR_POP to JUMP_IF_TRUE_OR_POP --> POP_JUMP_IF_FALSE to non-jump
def f(a, b, c):
return ((a and b)
or c)
self.check_jump_targets(f)
self.check_lnotab(f)
self.assertNotInBytecode(f, 'JUMP_IF_FALSE_OR_POP')
self.assertInBytecode(f, 'JUMP_IF_TRUE_OR_POP')
self.assertInBytecode(f, 'POP_JUMP_IF_FALSE')
# JUMP_IF_TRUE_OR_POP to JUMP_IF_FALSE_OR_POP --> POP_JUMP_IF_TRUE to non-jump
def f(a, b, c):
return ((a or b)
and c)
self.check_jump_targets(f)
self.check_lnotab(f)
self.assertNotInBytecode(f, 'JUMP_IF_TRUE_OR_POP')
self.assertInBytecode(f, 'JUMP_IF_FALSE_OR_POP')
self.assertInBytecode(f, 'POP_JUMP_IF_TRUE')
def test_elim_jump_after_return1(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
if cond1: return 1
if cond2: return 2
while 1:
return 3
while 1:
if cond1: return 4
return 5
return 6
self.assertNotInBytecode(f, 'JUMP_FORWARD')
self.assertNotInBytecode(f, 'JUMP_ABSOLUTE')
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertLessEqual(len(returns), 6)
self.check_lnotab(f)
def test_elim_jump_after_return2(self):
# Eliminate dead code: jumps immediately after returns can't be reached
def f(cond1, cond2):
while 1:
if cond1: return 4
self.assertNotInBytecode(f, 'JUMP_FORWARD')
# There should be one jump for the while loop.
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'JUMP_ABSOLUTE']
self.assertEqual(len(returns), 1)
returns = [instr for instr in dis.get_instructions(f)
if instr.opname == 'RETURN_VALUE']
self.assertLessEqual(len(returns), 2)
self.check_lnotab(f)
def test_make_function_doesnt_bail(self):
def f():
def g()->1+1:
pass
return g
self.assertNotInBytecode(f, 'BINARY_ADD')
self.check_lnotab(f)
def test_constant_folding(self):
# Issue #11244: aggressive constant folding.
exprs = [
'3 * -5',
'-3 * 5',
'2 * (3 * 4)',
'(2 * 3) * 4',
'(-1, 2, 3)',
'(1, -2, 3)',
'(1, 2, -3)',
'(1, 2, -3) * 6',
'lambda x: x in {(3 * -5) + (-1 - 6), (1, -2, 3) * 2, None}',
]
for e in exprs:
code = compile(e, '', 'single')
for instr in dis.get_instructions(code):
self.assertFalse(instr.opname.startswith('UNARY_'))
self.assertFalse(instr.opname.startswith('BINARY_'))
self.assertFalse(instr.opname.startswith('BUILD_'))
self.check_lnotab(code)
def test_in_literal_list(self):
def containtest():
return x in [a, b]
self.assertEqual(count_instr_recursively(containtest, 'BUILD_LIST'), 0)
self.check_lnotab(containtest)
def test_iterate_literal_list(self):
def forloop():
for x in [a, b]:
pass
self.assertEqual(count_instr_recursively(forloop, 'BUILD_LIST'), 0)
self.check_lnotab(forloop)
def test_condition_with_binop_with_bools(self):
def f():
if True or False:
return 1
return 0
self.assertEqual(f(), 1)
self.check_lnotab(f)
def test_if_with_if_expression(self):
# Check bpo-37289
def f(x):
if (True if x else False):
return True
return False
self.assertTrue(f(True))
self.check_lnotab(f)
def test_trailing_nops(self):
# Check the lnotab of a function that even after trivial
# optimization has trailing nops, which the lnotab adjustment has to
# handle properly (bpo-38115).
def f(x):
while 1:
return 3
while 1:
return 5
return 6
self.check_lnotab(f)
def test_assignment_idiom_in_comprehensions(self):
def listcomp():
return [y for x in a for y in [f(x)]]
self.assertEqual(count_instr_recursively(listcomp, 'FOR_ITER'), 1)
def setcomp():
return {y for x in a for y in [f(x)]}
self.assertEqual(count_instr_recursively(setcomp, 'FOR_ITER'), 1)
def dictcomp():
return {y: y for x in a for y in [f(x)]}
self.assertEqual(count_instr_recursively(dictcomp, 'FOR_ITER'), 1)
def genexpr():
return (y for x in a for y in [f(x)])
self.assertEqual(count_instr_recursively(genexpr, 'FOR_ITER'), 1)
class TestBuglets(unittest.TestCase):
def test_bug_11510(self):
# folded constant set optimization was commingled with the tuple
# unpacking optimization which would fail if the set had duplicate
# elements so that the set length was unexpected
def f():
x, y = {1, 1}
return x, y
with self.assertRaises(ValueError):
f()
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
###
# Copyright (c) 2013, spline
# All rights reserved.
#
#
###
# my libs
from base64 import b64decode
import cPickle as pickle
from BeautifulSoup import BeautifulSoup
import sqlite3
import os.path
import datetime # utc time.
from itertools import chain
# extra supybot libs
import supybot.conf as conf
import supybot.schedule as schedule
import supybot.ircmsgs as ircmsgs
# stock supybot libs
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('CFBLive')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x:x
class CFBLive(callbacks.Plugin):
"""Add the help for "@plugin help CFBLive" here
This should describe *how* to use this plugin."""
threaded = True
def __init__(self, irc):
self.__parent = super(CFBLive, self)
self.__parent.__init__(irc)
# our cfblive db.
self._cfbdb = os.path.abspath(os.path.dirname(__file__)) + '/db/cfb.db'
# initial states for channels.
self.channels = {} # dict for channels with values as teams/ids
self._loadpickle() # load saved data.
# initial states for games.
self.games = None
self.nextcheck = None
# dupedict.
self.dupedict = {}
# fetchhost system.
self.fetchhost = None
self.fetchhostcheck = None
# rankings.
self.rankings = {}
self.rankingstimer = None
# fill in the blanks.
if not self.games:
self.games = self._fetchgames()
# setup the function for cron.
def checkcfbcron():
try:
self.checkcfb(irc)
except Exception, e: # something broke. The plugin will stop itself from reporting.
self.log.error("cron: ERROR :: {0}".format(e))
self.nextcheck = self._utcnow()+72000 # add some major delay so the plugin does not spam.
# and add the cronjob.
try: # add our cronjob.
schedule.addPeriodicEvent(checkcfbcron, 30, now=True, name='checkcfb')
except AssertionError:
try:
schedule.removeEvent('checkcfb')
except KeyError:
pass
schedule.addPeriodicEvent(checkcfbcron, 30, now=True, name='checkcfb')
def die(self):
try: # remove cronjob.
schedule.removeEvent('checkcfb')
except KeyError:
pass
self.__parent.die()
######################
# INTERNAL FUNCTIONS #
######################
def _httpget(self, url):
"""General HTTP resource fetcher."""
# self.log.info(url)
try:
h = {"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:17.0) Gecko/20100101 Firefox/17.0"}
page = utils.web.getUrl(url, headers=h)
return page
except utils.web.Error as e:
self.log.error("ERROR opening {0} message: {1}".format(url, e))
return None
def _utcnow(self):
"""Calculate Unix timestamp from GMT."""
ttuple = datetime.datetime.utcnow().utctimetuple()
_EPOCH_ORD = datetime.date(1970, 1, 1).toordinal()
year, month, day, hour, minute, second = ttuple[:6]
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
###########################################
# INTERNAL CHANNEL POSTING AND DELEGATION #
###########################################
def _post(self, irc, awayid, homeid, message):
"""Posts message to a specific channel."""
# how this works is we have an incoming away and homeid. we find out their conference ids.
# against the self.channels dict (k=channel, v=set of #). then, if any of the #'s match in the v
# we insert this back into postchans so that the function posts the message into the proper channel(s).
if len(self.channels) == 0: # first, we have to check if anything is in there.
#self.log.error("ERROR: I do not have any channels to output in.")
return
# we do have channels. lets go and check where to put what.
confids = self._tidstoconfids(awayid, homeid) # grab the list of conf ids.
if not confids: # failsafe here.
self.log.error("_post: something went wrong with confids for awayid: {0} homeid: {1} m: {2} confids: {3}".format(awayid, homeid, message, confids))
return
postchans = [k for (k, v) in self.channels.items() if __builtins__['any'](z in v for z in confids)]
# iterate over each.
for postchan in postchans:
try:
irc.queueMsg(ircmsgs.privmsg(postchan, message))
except Exception as e:
self.log.error("ERROR: Could not send {0} to {1}. {2}".format(message, postchan, e))
##############################
# INTERNAL CHANNEL FUNCTIONS #
##############################
def _loadpickle(self):
"""Load channel data from pickle."""
try:
datafile = open(conf.supybot.directories.data.dirize(self.name()+".pickle"), 'rb')
try:
dataset = pickle.load(datafile)
finally:
datafile.close()
except IOError:
return False
# restore.
self.channels = dataset["channels"]
return True
def _savepickle(self):
"""Save channel data to pickle."""
data = {"channels": self.channels}
try:
datafile = open(conf.supybot.directories.data.dirize(self.name()+".pickle"), 'wb')
try:
pickle.dump(data, datafile)
finally:
datafile.close()
except IOError:
return False
return True
##################################
# TEAM DB AND DATABASE FUNCTIONS #
##################################
def _tidwrapper(self, tid, d=False):
"""TeamID wrapper."""
# first, try to see if it's in the database.
dblookup = self._tidtoname(tid, d=d)
if dblookup: # return the DB entry.
return dblookup
else: # not in the db. perform http lookup to grab its name.
url = b64decode('aHR0cDovL20ueWFob28uY29tL3cvc3BvcnRzL25jYWFmL3RlYW0v') + 'ncaaf.t.%s' % str(tid)
html = self._httpget(url)
if not html:
self.log.error("ERROR: _tidwrapper: Could not fetch {0}".format(url))
return "Unknown"
# try and grab teamname.
soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES, fromEncoding='utf-8')
teamname = soup.find('div', attrs={'class':'uic title first'})
if teamname: # we found a team name.
teamname = teamname.getText().encode('utf-8')
# see if we can find the conference.
conf = soup.find('div', attrs={'class':'uic last'})
if conf: # we found their conf.
conf = conf.getText()
else: # no conf.
conf = "None"
self.log.info('_tidwrapper: INSERT INTO teams VALUES ("{0}", "{1}", "{2}", "");'.format(conf, tid, teamname))
if d: # return as dict.
return {'team': teamname.encode('utf-8')}
else:
return teamname.encode('utf-8')
else: # didn't find the team. Gotta bail..
self.log.error("ERROR: _tidwrapper: Could not find teamname for tid: {0}".format(tid))
if d:
return {'team': 'Unknown'}
else:
return "Unknown"
def _tidtoname(self, tid, d=False):
"""Return team name for teamid from database. Use d=True to return as dict."""
with sqlite3.connect(self._cfbdb) as conn:
cursor = conn.cursor()
cursor.execute("SELECT team, tid FROM teams WHERE id=?", (tid,))
row = cursor.fetchone()
# now return the name.
if not row: # didn't find. we just return None here.
return None
else: # did find.
if row[1] != '': # some are empty. we did get something back.
# check if we have rankings and team is in rankings dict.
if ((self.rankings) and (row[1] in self.rankings)): # in there so append the #.
if d: # return as dict.
return {'rank':self.rankings[row[1]], 'team':row[0].encode('utf-8')}
else: # normal return
return "({0}){1}".format(self.rankings[row[1]], row[0].encode('utf-8'))
else: # no rankings or not in the table so just return the teamname.
if d: # return as dict.
return {'team': row[0].encode('utf-8')}
else: # normal return
return row[0].encode('utf-8')
else: # return just the team.
if d: # return as dict.
return {'team': row[0].encode('utf-8')}
else: # normal return
return row[0].encode('utf-8')
def _tidstoconfids(self, tid1, tid2):
"""Fetch the conference ID for a team."""
with sqlite3.connect(self._cfbdb) as conn:
cursor = conn.cursor()
query = "SELECT DISTINCT conf FROM teams WHERE id IN (?, ?)"
cursor.execute(query, (tid1, tid2,))
item = [i[0] for i in cursor.fetchall()] # put the ids into a list.
# check to make sure we have something.
if len(item) == 0:
return None
else:
return item
def _confs(self):
"""Return a dict containing all conferences and their ids: k=id, v=confs."""
with sqlite3.connect(self._cfbdb) as conn:
cursor = conn.cursor()
query = "SELECT id, conference FROM confs"
cursor.execute(query)
c = dict((i[0], i[1]) for i in cursor.fetchall())
return c
def _validconf(self, confname):
"""Validate a conf and return its ID."""
with sqlite3.connect(self._cfbdb) as conn:
cursor = conn.cursor()
query = "SELECT id FROM confs WHERE conference=?"
cursor.execute(query, (confname,))
row = cursor.fetchone()
# now return the name.
if row:
return row[0]
else:
return None
def _tidtoconf(self, tid):
"""Fetch what conference name (string) a team is in."""
with sqlite3.connect(self._cfbdb) as conn:
cursor = conn.cursor()
query = "SELECT conference FROM confs WHERE id IN (SELECT conf FROM teams WHERE id=?)"
cursor.execute(query, (tid,))
conference = cursor.fetchone()[0]
# now return.
return conference.encode('utf-8')
def _confidtoname(self, confid):
"""Validate a conf and return its ID."""
with sqlite3.connect(self._cfbdb) as conn:
cursor = conn.cursor()
query = "SELECT conference FROM confs WHERE id=?"
cursor.execute(query, (confid,))
row = cursor.fetchone()
# now return the name.
if row:
return row[0].encode('utf-8')
else:
return None
def _fbsconfs(self):
"""Return a list of all FBS conference ids."""
with sqlite3.connect(self._cfbdb) as conn:
cursor = conn.cursor()
query = "SELECT id FROM confs WHERE division=1"
cursor.execute(query)
confids = [i[0] for i in cursor.fetchall()]
# now return.
return confids
####################
# FETCH OPERATIONS #
####################
def _fetchhost(self):
"""Return the host for fetch operations."""
utcnow = self._utcnow()
# if we don't have the host, lastchecktime, or fetchhostcheck has passed, we regrab.
if ((not self.fetchhostcheck) or (not self.fetchhost) or (self.fetchhostcheck < utcnow)):
url = b64decode('aHR0cDovL2F1ZC5zcG9ydHMueWFob28uY29tL2Jpbi9ob3N0bmFtZQ==')
html = self._httpget(url) # try and grab.
if not html:
self.log.error("ERROR: _fetchhost: could not fetch {0}")
return None
# now that we have html, make sure its valid.
if html.startswith("aud"):
fhurl = 'http://%s' % (html.strip())
self.fetchhost = fhurl # set the url.
self.fetchhostcheck = utcnow+3600 # 1hr from now.
return fhurl
else:
self.log.error("ERROR: _fetchhost: returned string didn't match aud. We got {0}".format(html))
return None
else: # we have a host and it's under the cache time.
return self.fetchhost
def _fetchgames(self, filt=True):
"""Return the games.txt data into a processed dict. Set filter=False for all games."""
url = self._fetchhost() # grab the host to check.
if not url: # didn't get it back.
self.log.error("ERROR: _fetchgames broke on _fetchhost()")
return None
else: # we got fetchhost. create the url.
url = "%s/ncaaf/games.txt" % (url)
# now we try and fetch the actual url with data.
html = self._httpget(url)
if not html:
self.log.error("ERROR: _fetchgames: could not fetch {0} :: {1}".format(url))
return None
# now turn the "html" into a list of dicts.
newgames = self._txttodict(html, filt=filt)
if not newgames: # no new games for some reason.
return None
else: # we have games. return.
return newgames
def _filtergame(self, at, ht):
"""With at and ht ids, we need to test if we should filter."""
# check to see what activeconfs comes from.
if len(self.channels) != 0: # we have "active" confs. consolidate the sets from each active channel.
activeconfs = set(chain.from_iterable(([v for (k, v) in self.channels.items()])))
else: # no active confs so we just grab FBS conf ids.
activeconfs = set(self._fbsconfs())
# now lets take the at+ht ids and test.
teamidslist = self._tidstoconfids(at, ht) # grab the list of conf ids for this game.
if teamidslist: # failsafe but should never trigger.
if not activeconfs.isdisjoint(teamidslist): # this will be True if one of the ids from teamidslist = in activeconfs.
return True
else: # at/ht (game) is NOT in activeconfs.
return False
else: # missing teams.. sigh.
self.log.info("_filtergame: teamidslist failed on one of AT: {0} HT: {1}".format(at, ht))
return False
def _txttodict(self, txt, filt):
"""Games game lines from fetchgames and turns them into a list of dicts. filt=True to limit games."""
lines = txt.splitlines() # split.
games = {} # container.
for line in lines: # iterate over.
if line.startswith('g|'): # only games.
cclsplit = line.split('|') # split.
# handle each field here. cryptic but we figured it out w/Intrepd's help.
t = {} # tmp dict for each line.
t['awayteam'] = cclsplit[2]
t['hometeam'] = cclsplit[3]
t['status'] = cclsplit[4]
t['quarter'] = cclsplit[6]
t['time'] = cclsplit[7]
t['awayscore'] = int(cclsplit[8])
t['homescore'] = int(cclsplit[9])
t['start'] = int(cclsplit[10])
# now we need to test if we should filter.
if filt: # True. filtertest will be True if we should include the game. False if we should skip/pass over.
filtertest = self._filtergame(t['awayteam'], t['hometeam'])
if filtertest: # add into games dict.
games[cclsplit[1]] = t
else: # False. Don't filter. Add everything.
games[cclsplit[1]] = t
# process if we have games or not.
if len(games) == 0: # no games.
self.log.error("ERROR: No matching lines in _txttodict")
self.log.error("ERROR: _txttodict: {0}".format(txt))
return None
else:
return games
def _scoreevent(self, gid):
"""Fetch last scoring event from game."""
url = self._fetchhost() # grab the host to check.
if not url: # didn't get it back.
self.log.error("ERROR: _scoreevent broke on _fetchhost()")
return None
else: # we got fetchhost. create the url.
url = '%s/ncaaf/plays-%s.txt' % (url, str(gid))
# now fetch the url.
html = self._httpget(url)
if not html:
self.log.error("ERROR: Could not fetch {0} :: {1}".format(url))
return None
# process.
lines = html.splitlines()
scorelines = [] # put matching lines into list.
for line in lines: # iterate over each.
if line.startswith('s'): # only scoring.
linesplit = line.split('|') # split line.
scorelines.append(linesplit) # append.
# make sure we have scorelines.
if len(scorelines) == 0: # bail if 0.
return None
# now return process scorelines.
lastline = scorelines[-1] # grab the last item in scorelines list.
ev = {'id':lastline[2], 'event':lastline[10].encode('utf-8')} # id is 2, event itself is 10.
return ev
def _boldleader(self, awayteam, awayscore, hometeam, homescore):
"""Conveinence function to bold the leader."""
if (int(awayscore) > int(homescore)): # visitor winning.
return "{0} {1} {2} {3}".format(ircutils.bold(awayteam), ircutils.bold(awayscore), hometeam, homescore)
elif (int(awayscore) < int(homescore)): # home winning.
return "{0} {1} {2} {3}".format(awayteam, awayscore, ircutils.bold(hometeam), ircutils.bold(homescore))
else: # tie.
return "{0} {1} {2} {3}".format(awayteam, awayscore, hometeam, homescore)
def _scoretype(self, pd):
"""Return score event type based on the difference in points."""
if pd in (1, 2): # 1 and 2 pt safety.
return "SAF"
elif pd == 3: # fg.
return "FG"
elif pd in (6, 7, 8): # td.
return "TD"
else: # rutroh.
self.log.info("_scoretype: something sent me {0}".format(pd))
return "UNK"
def _rankings(self):
"""Fetch the AP/BCS rankings for display."""
# first, we need the time.
utcnow = self._utcnow()
# now determine if we should repopulate.
if ((len(self.rankings) == 0) or (not self.rankingstimer) or (utcnow > self.rankingstimer)):
# we'll put a try/except for the BCS, as well.
url = b64decode('aHR0cDovL3Nwb3J0cy55YWhvby5jb20vbmNhYS9mb290YmFsbC9wb2xscz9wb2xsPTE=')
# fetch url
html = self._httpget(url)
if not html:
self.log.error("ERROR: Could not fetch {0}".format(url))
self.rankingstimer = utcnow+60
self.log.info("_rankings: AP html failed")
try: # parse the table and populate.
soup = BeautifulSoup(html)
table = soup.find('table', attrs={'id':'ysprankings-results-table'})
rows = table.findAll('tr')[1:26] # just to make sure.
for i, row in enumerate(rows):
team = row.find('a')['href'].split('/')[5] # find the team abbr.
self.rankings[team] = i+1 # populate dict.
# now finalize.
self.rankingstimer = utcnow+86400 # 24hr.
self.log.info("_rankings: updated AP rankings.")
except Exception, e: # something went wrong.
self.log.error("_rankings: AP ERROR: {0}".format(e))
self.rankingstimer = utcnow+60 # rerun in one minute.
def _gctosec(self, s):
"""Convert seconds of clock into an integer of seconds remaining."""
#self.log.info("S IS: {0} AND TYPE: {1}".format(s, type(s)))
if isinstance(s, str):
if ':' in s:
l = s.split(':')
return int(int(l[0]) * 60 + int(l[1]))
else:
return int(round(float(s)))
else:
return s
######################
# CHANNEL MANAGEMENT #
######################
def cfbliveon(self, irc, msg, args):
"""
Re-enable CFBLive updates in channel.
Must be enabled by an op in the channel scores are already enabled for.
"""
# channel
channel = channel.lower()
# check if op.
if not irc.state.channels[channel].isOp(msg.nick):
irc.reply("ERROR: You must be an op in this channel for this command to work.")
return
# check if channel is already on.
if channel in self.channels:
irc.reply("ERROR: {0} is already enabled for CFBLive updates.".format(channel))
# we're here if it's not. let's re-add whatever we have saved.
# most of this is from _loadchannels
try:
datafile = open(conf.supybot.directories.data.dirize(self.name()+".pickle"), 'rb')
try:
dataset = pickle.load(datafile)
finally:
datafile.close()
except IOError:
irc.reply("ERROR: I could not open the CFBLive pickle to restore. Something went horribly wrong.")
return
# now check if channels is in the dataset from the pickle.
if channel in dataset['channels']: # it is. we're good.
self.channels[channel] = dataset['channels'][channel] # restore it.
else:
irc.reply("ERROR: {0} is not in the saved channel list. Please use cfbchannel to add it.".format(channel))
cfbliveon = wrap(cfbliveon, [('channel')])
def cfbliveoff(self, irc, msg, args):
"""
Disable CFBLive scoring updates in a channel.
Must be issued by an op in a channel it is enabled for.
"""
# channel
channel = channel.lower()
# check if op.
if not irc.state.channels[channel].isOp(msg.nick):
irc.reply("ERROR: You must be an op in this channel for this command to work.")
return
# check if channel is already on.
if channel not in self.channels:
irc.reply("ERROR: {0} is not in self.channels. I can't disable updates for a channel I don't have configured.".format(channel))
return
else: # channel is in the dict so lets do a temp disable by deleting it.
del self.channels[channel]
irc.reply("I have successfully disabled cfblive updates in {0}".format(channel))
cfbliveoff = wrap(cfbliveoff, [('channel')])
def cfbchannel(self, irc, msg, args, op, optchannel, optarg):
"""<add|list|del|confs> <#channel> <CONFERENCE>
Add or delete conference(s) from a specific channel's output.
Use conference name or ALL for everything. Can only specify one at a time.
Ex: add #channel1 ALL OR add #channel2 SEC OR del #channel1 ALL OR list
"""
# first, lower operation.
op = op.lower()
# next, make sure op is valid.
validop = ['add', 'list', 'del', 'confs']
if op not in validop: # test for a valid operation.
irc.reply("ERROR: '{0}' is an invalid operation. It must be be one of: {1}".format(op, " | ".join([i for i in validop])))
return
# if we're not doing list (add or del) make sure we have the arguments.
if ((op != 'list') and (op != 'confs')):
if not optchannel or not optarg: # add|del need these.
irc.reply("ERROR: add and del operations require a channel and team. Ex: add #channel SEC OR del #channel SEC")
return
# we are doing an add/del op.
optchannel = optchannel.lower()
# make sure channel is something we're in
if op == 'add': # check for channel on add only.
if optchannel not in irc.state.channels:
irc.reply("ERROR: '{0}' is not a valid channel. You must add a channel that we are in.".format(optchannel))
return
# test for valid team now.
confid = self._validconf(optarg)
if not confid: # invalid arg(conf)
irc.reply("ERROR: '{0}' is an invalid conference. Must be one of: {1}".format(optarg, " | ".join(sorted(self._confs().values()))))
return
# main meat part.
# now we handle each op individually.
if op == 'add': # add output to channel.
self.channels.setdefault(optchannel, set()).add(confid) # add it.
self._savepickle() # save.
irc.reply("I have added {0} into {1}".format(optarg, optchannel))
elif op == 'confs': # list confs.
irc.reply("Valid Confs for cfbchannel: {0}".format(" | ".join(sorted(self._confs().values()))))
elif op == 'list': # list channels.
if len(self.channels) == 0: # no channels.
irc.reply("ERROR: I have no active channels defined. Please use the cfbchannel add operation to add a channel.")
else: # we do have channels.
for (k, v) in self.channels.items(): # iterate through and output
irc.reply("{0} :: {1}".format(k, " | ".join([self._confidtoname(q) for q in v])))
elif op == 'del': # delete an item from channels.
if optchannel in self.channels:
if confid in self.channels[optchannel]: # id is already in.
self.channels[optchannel].remove(confid) # remove it.
if len(self.channels[optchannel]) == 0: # none left.
del self.channels[optchannel] # delete the channel key.
self._savepickle() # save it.
irc.reply("I have successfully removed {0} from {1}".format(optarg, optchannel))
else:
irc.reply("ERROR: I do not have {0} in {1}".format(optarg, optchannel))
else:
irc.reply("ERROR: I do not have {0} in {1}".format(optarg, optchannel))
cfbchannel = wrap(cfbchannel, [('checkCapability', 'admin'), ('somethingWithoutSpaces'), optional('channel'), optional('text')])
###################
# PUBLIC COMMANDS #
###################
#def cfbgames(self, irc, msg, args):
# """
# Display all current games in the self.games
# """
#
# games = self._fetchgames(filt=False)
# if not games:
# irc.reply("ERROR: Fetching games.")
# return
# for (k, v) in games.items():
# at = self._tidwrapper(v['awayteam'])
# ht = self._tidwrapper(v['hometeam'])
# irc.reply("{0} v. {1} :: {2}".format(at, ht, v))
#
#cfbgames = wrap(cfbgames)
def checkcfb(self, irc):
#def checkcfb(self, irc, msg, args):
"""
Main loop.
"""
# debug.
self.log.info("checkcfb: starting...")
# before anything, check if nextcheck is set and is in the future.
if self.nextcheck: # set
utcnow = self._utcnow()
if self.nextcheck > utcnow: # in the future so we backoff.
self.log.info("checkcfb: nextcheck is in {0}s".format(self.nextcheck-utcnow))
return
else: # in the past so lets reset it. this means that we've reached the time where firstgametime should begin.
self.log.info("checkcfb: nextcheck has passed. we are resetting and continuing normal operations.")
self.nextcheck = None
# we must have initial games. bail if not.
if not self.games:
self.games = self._fetchgames()
return
# check and see if we have initial games, again, but bail if no.
if not self.games:
self.log.error("checkcfb: I did not have any games in self.games")
return
else: # setup the initial games.
games1 = self.games
# now we must grab the new status to compare to.
games2 = self._fetchgames()
if not games2: # something went wrong so we bail.
self.log.error("checkcfb: fetching games2 failed.")
return
# before we run the main event handler, make sure we have rankings.
self._rankings()
# main handler for event changes.
# we go through and have to match specific conditions based on changes.
for (k, v) in games1.items(): # iterate over games.
if k in games2: # must mate keys between games1 and games2.
# ACTIVE GAME EVENTS HERE
if ((v['status'] == "P") and (games2[k]['status'] == "P")):
# make sure the event is dupedict so we can print events.
if k not in self.dupedict:
self.dupedict[k] = set([]) # add.
# SCORING PLAY.
if ((games2[k]['awayscore'] > v['awayscore']) or (games2[k]['homescore'] > v['homescore'])):
self.log.info("Should post scoring event from {0}".format(k))
# first, get some basics with teamnames.
at = self._tidwrapper(v['awayteam'], d=True) # fetch visitor.
ht = self._tidwrapper(v['hometeam'], d=True) # fetch home.
# get the score diff so we can figure out the score type and who scored.
apdiff = abs((int(v['awayscore'])-int(games2[k]['awayscore']))) # awaypoint diff.
hpdiff = abs((int(v['homescore'])-int(games2[k]['homescore']))) # homepoint diff.
if apdiff != 0: # awayscore is not 0, ie: awayteam scored.
sediff = apdiff # int
seteam = at['team'] # get awayteam.
else: # hometeam scored.
sediff = hpdiff # int
seteam = ht['team'] # get awayteam.
# figure out the scoretype.
setype = self._scoretype(sediff) # figure out score type.
# we need to reconstruct at/ht as a string. since we called tidwrapper with d=True, we have to reattach the ranking, if present.
if 'rank' in at: # we have rank so (#)Team.
at = "({0}){1}".format(at['rank'], at['team'])
else: # no rank.
at = "{0}".format(at['team'])
if 'rank' in ht: # do the same for the hometeam.
ht = "({0}){1}".format(ht['rank'], ht['team'])
else: # no rank.
ht = "{0}".format(ht['team'])
# now construct the rest of the string.
gamestr = self._boldleader(at, games2[k]['awayscore'], ht, games2[k]['homescore']) # bold the leader.
scoretime = "{0} {1}".format(utils.str.ordinal(games2[k]['quarter']), games2[k]['time']) # score time.
se = self._scoreevent(v['hometeam']) # use the hometeam id for plays-### (scoreevent page).
if se: # we got scoringevent back.
# make sure this event has not been posted yet.
if se['id'] not in self.dupedict[k]: # we have NOT posted it yet. lets format for output.
mstr = "{0} :: {1} :: {2} :: {3} ({4})".format(gamestr, ircutils.bold(setype), seteam, se['event'], scoretime) # lets construct the string.
self._post(irc, v['awayteam'], v['hometeam'], mstr) # post to irc.
self.dupedict[k].add(se['id']) # add to dupedict.
else: # scoring event did not work. just post a generic string. this could be buggy.
mstr = "{0} :: {1} :: {2} ({3})".format(gamestr, ircutils.bold(setype), seteam, scoretime)
self._post(irc, v['awayteam'], v['hometeam'], mstr) # post to irc.
# UPSET ALERT. CHECKS ONLY IN 3RD/4TH QUARTER AT 2 MINUTE MARK.
if ((games2[k]['quarter'] in ("3", "4")) and (v['time'] != games2[k]['time']) and (self._gctosec(v['time']) >= 120) and (self._gctosec(games2[k]['time']) < 120)):
#self.log.info("inside upset alert {0}".format(k))
# fetch teams with ranking in dict so we can determine if there is a potential upset on hand.
at = self._tidwrapper(v['awayteam'], d=True) # fetch visitor.
ht = self._tidwrapper(v['hometeam'], d=True) # fetch home.
# now we need to check if there is a ranking in either or both teams and
# act properly depending on the rank + score.
if (('rank' in at) or ('rank' in ht)): # require ranking. 3 scenarios: at ranked, ht ranked, both ranked.
#self.log.info("2nd upset alert in {0}".format(k))
awayscore = games2[k]['awayscore'] # grab the score.
homescore = games2[k]['homescore']
scorediff = abs(awayscore-homescore) # abs on the points diff.
upsetalert, potentialupsetalert, upsetstr = False, False, None # defaults.
if (('rank' in at) and ('rank' not in ht)): # away team ranked, home team is not.
#self.log.info("rank in at not ht {0}".format(k))
if homescore > awayscore: # ranked awayteam is losing.
upsetalert = True
else:
if scorediff < 9: # score is within a single possession.
potentialupsetalert = True
elif (('rank' not in at) and ('rank' in ht)): # home team ranked, away is not.
#self.log.info("rank in ht not at {0}".format(k))
if awayscore > homescore: # ranked hometeam is losing.
upsetalert = True
else:
if scorediff < 9: # score is within a single possession.
potentialupsetalert = True
else: # both teams are ranked, so we have to check what team is ranked higher and act accordingly.
#self.log.info("both teams ranked {0}".format(k))
if at['rank'] < ht['rank']: # away team ranked higher. (lower is higher)
if homescore > awayscore: # home team is winning.
upsetalert = True
else:
if scorediff < 9: # score is within a single possession.
potentialupsetalert = True
else: # home team is ranked higher. (lower is higher)
if awayscore > homescore: # away team is winning.
upsetalert = True
else:
if scorediff < 9: # score is within a single possession.
potentialupsetalert = True
# now that we're done, we check on upsetalert and potentialupsetalert to set upsetstr.
if upsetalert: # we have an upset alert.
upsetstr = ircutils.bold("PEPTO-BISMOL UPSET ALERT")
elif potentialupsetalert: # we have a potential upset.
upsetstr = ircutils.bold("POTENTIAL PEPTO-BISMOL UPSET ALERT")
# should we fire?
if upsetstr: # this was set above if conditions were met. so lets get our std gamestr, w/score, add the string, and post.
self.log.info("SHOULD BE POSTING ACTUAL UPSET ALERT STRING FROM {0}".format(k))
gamestr = self._boldleader(self._tidwrapper(v['awayteam']), games2[k]['awayscore'], self._tidwrapper(v['hometeam']), games2[k]['homescore'])
mstr = "{0} :: {1}".format(gamestr, upsetstr)
self._post(irc, v['awayteam'], v['hometeam'], mstr)
# END OF 1ST AND 3RD QUARTER.
if ((v['time'] != games2[k]['time']) and (games2[k]['quarter'] in ("1", "3")) and (games2[k]['time'] == "0:00")):
self.log.info("Should end of quarter in {0}".format(k))
at = self._tidwrapper(v['awayteam']) # fetch visitor.
ht = self._tidwrapper(v['hometeam']) # fetch home.
gamestr = self._boldleader(at, games2[k]['awayscore'], ht, games2[k]['homescore'])
qtrstr = "End of {0} qtr".format(utils.str.ordinal(games2[k]['quarter']))
mstr = "{0} :: {1}".format(gamestr, ircutils.mircColor(qtrstr, 'red'))
self._post(irc, v['awayteam'], v['hometeam'], mstr)
# HALFTIME IN
if ((v['time'] != games2[k]['time']) and (games2[k]['quarter'] == "2") and (games2[k]['time'] == "0:00")):
self.log.info("Should fire halftime in {0}".format(k))
at = self._tidwrapper(v['awayteam']) # fetch visitor.
ht = self._tidwrapper(v['hometeam']) # fetch home.
gamestr = self._boldleader(at, games2[k]['awayscore'], ht, games2[k]['homescore'])
mstr = "{0} :: {1}".format(gamestr, ircutils.mircColor("HALFTIME", 'yellow'))
self._post(irc, v['awayteam'], v['hometeam'], mstr)
# HALFTIME OUT
if ((v['quarter'] != games2[k]['quarter']) and (v['time'] != games2[k]['time']) and (games2[k]['quarter'] == "3") and (games2[k]['time'] == "15:00")):
self.log.info("Should fire 3rd quarter in {0}".format(k))
at = self._tidwrapper(v['awayteam']) # fetch visitor.
ht = self._tidwrapper(v['hometeam']) # fetch home.
gamestr = self._boldleader(at, games2[k]['awayscore'], ht, games2[k]['homescore'])
mstr = "{0} :: {1}".format(gamestr, ircutils.mircColor("Start 3rd Qtr", 'green'))
self._post(irc, v['awayteam'], v['hometeam'], mstr)
# OT NOTIFICATION
if ((v['quarter'] != games2[k]['quarter']) and (int(games2[k]['quarter']) > 4)):
self.log.info("Should fire OT notification in {0}".format(k))
otper = "Start OT{0}".format(int(games2[k]['quarter'])-4) # should start with 5, which is OT1.
at = self._tidwrapper(v['awayteam']) # fetch visitor.
ht = self._tidwrapper(v['hometeam']) # fetch home.
gamestr = self._boldleader(at, games2[k]['awayscore'], ht, games2[k]['homescore'])
mstr = "{0} :: {1}".format(gamestr, ircutils.mircColor(otper, 'green'))
self._post(irc, v['awayteam'], v['hometeam'], mstr)
# EVENTS OUTSIDE OF AN ACTIVE GAME.
else:
# KICKOFF.
if ((v['status'] == "S") and (games2[k]['status'] == "P")):
self.log.info("{0} is kicking off.".format(k))
# add game into dupedict.
if k not in self.dupedict:
self.dupedict[k] = set([])
# now construct kickoff event.
at = self._tidwrapper(v['awayteam']) # fetch visitor.
ht = self._tidwrapper(v['hometeam']) # fetch home.
atconf = self._tidtoconf(v['awayteam']) # fetch visitor conf.
htconf = self._tidtoconf(v['hometeam']) # fetch hometeam conf.
mstr = "{0}({1}) @ {2}({3}) :: {4}".format(ircutils.bold(at), atconf, ircutils.bold(ht), htconf, ircutils.mircColor("KICKOFF", 'green'))
self._post(irc, v['awayteam'], v['hometeam'], mstr)
# GAME GOES FINAL.
if ((v['status'] == "P") and (games2[k]['status'] == "F")):
self.log.info("{0} is going final.".format(k))
at = self._tidwrapper(v['awayteam']) # fetch visitor.
ht = self._tidwrapper(v['hometeam']) # fetch home.
gamestr = self._boldleader(at, games2[k]['awayscore'], ht, games2[k]['homescore'])
if (int(games2[k]['quarter']) > 4):
fot = "F/OT{0}".format(int(games2[k]['quarter'])-4)
mstr = "{0} :: {1}".format(gamestr, ircutils.mircColor(fot, 'red'))
else:
mstr = "{0} :: {1}".format(gamestr, ircutils.mircColor("F", 'red'))
self._post(irc, v['awayteam'], v['hometeam'], mstr)
# lets now try to remove from dupedict.
if k in self.dupedict:
del self.dupedict[k] # delete.
# GAME GOES INTO A DELAY.
if ((v['status'] == "P") and (games2[k]['status'] == "D")):
self.log.info("{0} is going into delay.".format(k))
at = self._tidwrapper(v['awayteam']) # fetch visitor.
ht = self._tidwrapper(v['hometeam']) # fetch home.
mstr = "{0}@{1} :: {2}".format(at, ht, ircutils.mircColor("DELAY", 'yellow'))
self._post(irc, v['awayteam'], v['hometeam'], mstr)
# GAME COMES OUT OF A DELAY.
if ((v['status'] == "D") and (games2[k]['status'] == "P")):
self.log.info("{0} is resuming from delay.".format(k))
at = self._tidwrapper(v['awayteam']) # fetch visitor.
ht = self._tidwrapper(v['hometeam']) # fetch home.
mstr = "{0}@{1} :: {2}".format(at, ht, ircutils.mircColor("RESUMED", 'green'))
self._post(irc, v['awayteam'], v['hometeam'], mstr)
# done checking. copy new to self.games
self.games = games2 # change status.
# last, before we reset to check again, we need to verify some states of games in order to set sentinel or not.
# STATUSES :: D = Delay, P = Playing, S = Future Game, F = Final, O = PPD
# first, we grab all the statuses in newgames (games2)
gamestatuses = set([i['status'] for i in games2.values()])
self.log.info("GAMESTATUSES: {0}".format(gamestatuses))
# next, check what the statuses of those games are and act accordingly.
if (('D' in gamestatuses) or ('P' in gamestatuses)): # if any games are being played or in a delay, act normal.
self.nextcheck = None # set to None to make sure we're checking on normal time.
elif 'S' in gamestatuses: # no games being played or in delay, but we have games in the future. (ie: day games done but night games later)
firstgametime = sorted([f['start'] for (i, f) in games2.items() if f['status'] == "S"])[0] # get all start times with S, first (earliest).
utcnow = self._utcnow() # grab UTC now.
if firstgametime > utcnow: # make sure it is in the future so lock is not stale.
self.nextcheck = firstgametime # set to the "first" game with 'S'.
self.log.info("checkcfb: we have games in the future (S) so we're setting the next check {0} seconds from now".format(firstgametime-utcnow))
else: # firstgametime is NOT in the future. this is a problem.
fgtdiff = abs(firstgametime-utcnow) # get how long ago the first game should have been.
if fgtdiff < 3601: # if less than an hour ago, just basically pass.
self.nextcheck = None
self.log.info("checkcfb: firstgametime has passed but is under an hour so we resume normal operations.")
else: # over an hour so we set firstgametime an hour from now.
self.nextcheck = utcnow+600
self.log.info("checkcfb: firstgametime is over an hour late so we're going to backoff for 10 minutes")
else: # everything is "F" (Final). we want to backoff so we're not flooding.
self.nextcheck = self._utcnow()+600 # 10 minutes from now.
self.log.info("checkcfb: no active games and I have not got new games yet, so I am holding off for 10 minutes.")
#checkcfb = wrap(checkcfb)
Class = CFBLive
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| |
import sys
from django.conf import settings
from django.core.signals import got_request_exception
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.template import Template
from django.test import TestCase
class TestException(Exception):
pass
# A middleware base class that tracks which methods have been called
class TestMiddleware(object):
def __init__(self):
self.process_request_called = False
self.process_view_called = False
self.process_response_called = False
self.process_template_response_called = False
self.process_exception_called = False
def process_request(self, request):
self.process_request_called = True
def process_view(self, request, view_func, view_args, view_kwargs):
self.process_view_called = True
def process_template_response(self, request, response):
self.process_template_response_called = True
return response
def process_response(self, request, response):
self.process_response_called = True
return response
def process_exception(self, request, exception):
self.process_exception_called = True
# Middleware examples that do the right thing
class RequestMiddleware(TestMiddleware):
def process_request(self, request):
super(RequestMiddleware, self).process_request(request)
return HttpResponse('Request Middleware')
class ViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(ViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
return HttpResponse('View Middleware')
class ResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(ResponseMiddleware, self).process_response(request, response)
return HttpResponse('Response Middleware')
class TemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(TemplateResponseMiddleware, self).process_template_response(request, response)
return TemplateResponse(request, Template('Template Response Middleware'))
class ExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(ExceptionMiddleware, self).process_exception(request, exception)
return HttpResponse('Exception Middleware')
# Sample middlewares that raise exceptions
class BadRequestMiddleware(TestMiddleware):
def process_request(self, request):
super(BadRequestMiddleware, self).process_request(request)
raise TestException('Test Request Exception')
class BadViewMiddleware(TestMiddleware):
def process_view(self, request, view_func, view_args, view_kwargs):
super(BadViewMiddleware, self).process_view(request, view_func, view_args, view_kwargs)
raise TestException('Test View Exception')
class BadTemplateResponseMiddleware(TestMiddleware):
def process_template_response(self, request, response):
super(BadTemplateResponseMiddleware, self).process_template_response(request, response)
raise TestException('Test Template Response Exception')
class BadResponseMiddleware(TestMiddleware):
def process_response(self, request, response):
super(BadResponseMiddleware, self).process_response(request, response)
raise TestException('Test Response Exception')
class BadExceptionMiddleware(TestMiddleware):
def process_exception(self, request, exception):
super(BadExceptionMiddleware, self).process_exception(request, exception)
raise TestException('Test Exception Exception')
class BaseMiddlewareExceptionTest(TestCase):
urls = 'middleware_exceptions.urls'
def setUp(self):
self.exceptions = []
got_request_exception.connect(self._on_request_exception)
self.client.handler.load_middleware()
def tearDown(self):
got_request_exception.disconnect(self._on_request_exception)
self.exceptions = []
def _on_request_exception(self, sender, request, **kwargs):
self.exceptions.append(sys.exc_info())
def _add_middleware(self, middleware):
self.client.handler._request_middleware.insert(0, middleware.process_request)
self.client.handler._view_middleware.insert(0, middleware.process_view)
self.client.handler._template_response_middleware.append(middleware.process_template_response)
self.client.handler._response_middleware.append(middleware.process_response)
self.client.handler._exception_middleware.append(middleware.process_exception)
def assert_exceptions_handled(self, url, errors, extra_error=None):
try:
response = self.client.get(url)
except TestException:
# Test client intentionally re-raises any exceptions being raised
# during request handling. Hence actual testing that exception was
# properly handled is done by relying on got_request_exception
# signal being sent.
pass
except Exception as e:
if type(extra_error) != type(e):
self.fail("Unexpected exception: %s" % e)
self.assertEqual(len(self.exceptions), len(errors))
for i, error in enumerate(errors):
exception, value, tb = self.exceptions[i]
self.assertEqual(value.args, (error, ))
def assert_middleware_usage(self, middleware, request, view, template_response, response, exception):
self.assertEqual(middleware.process_request_called, request)
self.assertEqual(middleware.process_view_called, view)
self.assertEqual(middleware.process_template_response_called, template_response)
self.assertEqual(middleware.process_response_called, response)
self.assertEqual(middleware.process_exception_called, exception)
class MiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_middleware(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_template_response_middleware(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, True, True, False)
self.assert_middleware_usage(middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_exception_middleware(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = TemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_response_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_not_found(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view'], Exception())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_exception(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return an HttpResponse object.",
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_middleware_null_view(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return an HttpResponse object."
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = RequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, True)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
middleware = ExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_template_response_error(self):
middleware = TestMiddleware()
self._add_middleware(middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response_error/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(middleware, True, True, True, True, False)
class BadMiddlewareTests(BaseMiddlewareExceptionTest):
def test_process_request_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_template_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadTemplateResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/template_response/', ['Test Template Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, True, True, False)
self.assert_middleware_usage(post_middleware, True, True, True, True, False)
def test_process_response_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/view/', [])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_not_found(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/not_found/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Error in view', 'Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_exception(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/error/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_request_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return an HttpResponse object.",
'Test Response Exception'
])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_exception_bad_middleware_null_view(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/null_view/', [
"The view middleware_exceptions.views.null_view didn't return an HttpResponse object."
],
ValueError())
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, True, False, True, False)
def test_process_request_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadRequestMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Request Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, False, False, True, False)
self.assert_middleware_usage(bad_middleware, True, False, False, True, False)
self.assert_middleware_usage(post_middleware, False, False, False, True, False)
def test_process_view_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadViewMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test View Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, False)
self.assert_middleware_usage(post_middleware, True, False, False, True, False)
def test_process_response_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadResponseMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Response Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, False, True)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
def test_process_exception_bad_middleware_permission_denied(self):
pre_middleware = TestMiddleware()
bad_middleware = BadExceptionMiddleware()
post_middleware = TestMiddleware()
self._add_middleware(post_middleware)
self._add_middleware(bad_middleware)
self._add_middleware(pre_middleware)
self.assert_exceptions_handled('/middleware_exceptions/permission_denied/', ['Test Exception Exception'])
# Check that the right middleware methods have been invoked
self.assert_middleware_usage(pre_middleware, True, True, False, True, False)
self.assert_middleware_usage(bad_middleware, True, True, False, True, True)
self.assert_middleware_usage(post_middleware, True, True, False, True, True)
_missing = object()
class RootUrlconfTests(TestCase):
urls = 'middleware_exceptions.urls'
def test_missing_root_urlconf(self):
try:
original_ROOT_URLCONF = settings.ROOT_URLCONF
del settings.ROOT_URLCONF
except AttributeError:
original_ROOT_URLCONF = _missing
self.assertRaises(AttributeError,
self.client.get, "/middleware_exceptions/view/"
)
if original_ROOT_URLCONF is not _missing:
settings.ROOT_URLCONF = original_ROOT_URLCONF
| |
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
import functools
import itertools
import operator
import warnings
from . import numeric as _nx
from . import overrides
from .multiarray import array, asanyarray, normalize_axis_index
from . import fromnumeric as _from_nx
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _atleast_1d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_1d_dispatcher)
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_2d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_2d_dispatcher)
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_3d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_3d_dispatcher)
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape) # doctest: +SKIP
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
warnings.warn('arrays to stack must be passed as a "sequence" type '
'such as list or tuple. Support for non-sequence '
'iterables such as generators is deprecated as of '
'NumPy 1.16 and will raise an error in the future.',
FutureWarning, stacklevel=stacklevel)
return ()
return arrays
def _vhstack_dispatcher(tup):
return _arrays_for_stack_dispatcher(tup)
@array_function_dispatch(_vhstack_dispatcher)
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
block : Assemble an nd-array from nested lists of blocks.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.vstack((a,b))
array([[1, 2, 3],
[4, 5, 6]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[4], [5], [6]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[4],
[5],
[6]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_2d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
return _nx.concatenate(arrs, 0)
@array_function_dispatch(_vhstack_dispatcher)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis, except for 1-D
arrays where it concatenates along the first axis. Rebuilds arrays divided
by `hsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis,
except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
block : Assemble an nd-array from nested lists of blocks.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((4,5,6))
>>> np.hstack((a,b))
array([1, 2, 3, 4, 5, 6])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[4],[5],[6]])
>>> np.hstack((a,b))
array([[1, 4],
[2, 5],
[3, 6]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_1d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
def _stack_dispatcher(arrays, axis=None, out=None):
arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_dispatch(_stack_dispatcher)
def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
block : Assemble an nd-array from nested lists of blocks.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.stack((a, b))
array([[1, 2, 3],
[4, 5, 6]])
>>> np.stack((a, b), axis=-1)
array([[1, 4],
[2, 5],
[3, 6]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(arrays, stacklevel=2)
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
axis = normalize_axis_index(axis, result_ndim)
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
# Internal functions to eliminate the overhead of repeated dispatch in one of
# the two possible paths inside np.block.
# Use getattr to protect against __array_function__ being disabled.
_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
def _block_format_index(index):
"""
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
"""
idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
return 'arrays' + idx_str
def _block_check_depths_match(arrays, parent_index=[]):
"""
Recursive function checking that the depths of nested lists in `arrays`
all match. Mismatch raises a ValueError as described in the block
docstring below.
The entire index (rather than just the depth) needs to be calculated
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
Parameters
----------
arrays : nested list of arrays
The arrays to check
parent_index : list of int
The full index of `arrays` within the nested lists passed to
`_block_check_depths_match` at the top of the recursion.
Returns
-------
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
The number of elements in the final array. This is used the motivate
the choice of algorithm used using benchmarking wisdom.
"""
if type(arrays) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
'{} is a tuple. '
'Only lists can be used to arrange blocks, and np.block does '
'not allow implicit conversion from tuple to ndarray.'.format(
_block_format_index(parent_index)
)
)
elif type(arrays) is list and len(arrays) > 0:
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
first_index, max_arr_ndim, final_size = next(idxs_ndims)
for index, ndim, size in idxs_ndims:
final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
raise ValueError(
"List depths are mismatched. First element was at depth "
"{}, but there is an element at depth {} ({})".format(
len(first_index),
len(index),
_block_format_index(index)
)
)
# propagate our flag that indicates an empty list at the bottom
if index[-1] is None:
first_index = index
return first_index, max_arr_ndim, final_size
elif type(arrays) is list and len(arrays) == 0:
# We've 'bottomed out' on an empty list
return parent_index + [None], 0, 0
else:
# We've 'bottomed out' - arrays is either a scalar or an array
size = _size(arrays)
return parent_index, _ndim(arrays), size
def _atleast_nd(a, ndim):
# Ensures `a` has at least `ndim` dimensions by prepending
# ones to `a.shape` as necessary
return array(a, ndmin=ndim, copy=False, subok=True)
def _accumulate(values):
return list(itertools.accumulate(values))
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatenation.
Returns
-------
shape: tuple of int
This tuple satisfies::
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds::
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
shape_at_axis = [shape[axis] for shape in shapes]
# Take a shape, any shape
first_shape = shapes[0]
first_shape_pre = first_shape[:axis]
first_shape_post = first_shape[axis+1:]
if any(shape[:axis] != first_shape_pre or
shape[axis+1:] != first_shape_post for shape in shapes):
raise ValueError(
'Mismatched array shapes in block along axis {}.'.format(axis))
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
offsets_at_axis = _accumulate(shape_at_axis)
slice_prefixes = [(slice(start, end),)
for start, end in zip([0] + offsets_at_axis,
offsets_at_axis)]
return shape, slice_prefixes
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
"""
Returns the shape of the final array, along with a list
of slices and a list of arrays that can be used for assignment inside the
new array
Parameters
----------
arrays : nested list of arrays
The arrays to check
max_depth : list of int
The number of nested lists
result_ndim : int
The number of dimensions in thefinal array.
Returns
-------
shape : tuple of int
The shape that the final array will take on.
slices: list of tuple of slices
The slices into the full array required for assignment. These are
required to be prepended with ``(Ellipsis, )`` to obtain to correct
final index.
arrays: list of ndarray
The data to assign to each slice of the full array
"""
if depth < max_depth:
shapes, slices, arrays = zip(
*[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
for arr in arrays])
axis = result_ndim - max_depth + depth
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
# Prepend the slice prefix and flatten the slices
slices = [slice_prefix + the_slice
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
for the_slice in inner_slices]
# Flatten the array list
arrays = functools.reduce(operator.add, arrays)
return shape, slices, arrays
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
# Return the slice and the array inside a list to be consistent with
# the recursive case.
arr = _atleast_nd(arrays, result_ndim)
return arr.shape, [()], [arr]
def _block(arrays, max_depth, result_ndim, depth=0):
"""
Internal implementation of block based on repeated concatenation.
`arrays` is the argument passed to
block. `max_depth` is the depth of nested lists within `arrays` and
`result_ndim` is the greatest of the dimensions of the arrays in
`arrays` and the depth of the lists in `arrays` (see block docstring
for details).
"""
if depth < max_depth:
arrs = [_block(arr, max_depth, result_ndim, depth+1)
for arr in arrays]
return _concatenate(arrs, axis=-(max_depth-depth))
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
return _atleast_nd(arrays, result_ndim)
def _block_dispatcher(arrays):
# Use type(...) is list to match the behavior of np.block(), which special
# cases list specifically rather than allowing for generic iterables or
# tuple. Also, we know that list.__array_function__ will never exist.
if type(arrays) is list:
for subarrays in arrays:
yield from _block_dispatcher(subarrays)
else:
yield arrays
@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated (see `concatenate`) along
the last dimension (-1), then these are concatenated along the
second-last dimension (-2), and so on until the outermost list is reached.
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``np.block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
.. versionadded:: 1.13.0
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third axis).
column_stack : Stack 1-D arrays as columns into a 2-D array.
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
Notes
-----
When called with only scalars, ``np.block`` is equivalent to an ndarray
call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
``np.array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
Examples
--------
The most common use of this function is to build a block matrix
>>> A = np.eye(2) * 2
>>> B = np.eye(3) * 3
>>> np.block([
... [A, np.zeros((2, 3))],
... [np.ones((3, 2)), B ]
... ])
array([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[1., 1., 3., 0., 0.],
[1., 1., 0., 3., 0.],
[1., 1., 0., 0., 3.]])
With a list of depth 1, `block` can be used as `hstack`
>>> np.block([1, 2, 3]) # hstack([1, 2, 3])
array([1, 2, 3])
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.block([a, b, 10]) # hstack([a, b, 10])
array([ 1, 2, 3, 4, 5, 6, 10])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([A, B]) # hstack([A, B])
array([[1, 1, 2, 2],
[1, 1, 2, 2]])
With a list of depth 2, `block` can be used in place of `vstack`:
>>> a = np.array([1, 2, 3])
>>> b = np.array([4, 5, 6])
>>> np.block([[a], [b]]) # vstack([a, b])
array([[1, 2, 3],
[4, 5, 6]])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([[A], [B]]) # vstack([A, B])
array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
It can also be used in places of `atleast_1d` and `atleast_2d`
>>> a = np.array(0)
>>> b = np.array([1])
>>> np.block([a]) # atleast_1d(a)
array([0])
>>> np.block([b]) # atleast_1d(b)
array([1])
>>> np.block([[a]]) # atleast_2d(a)
array([[0]])
>>> np.block([[b]]) # atleast_2d(b)
array([[1]])
"""
arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
# It was found through benchmarking that making an array of final size
# around 256x256 was faster by straight concatenation on a
# i7-7700HQ processor and dual channel ram 2400MHz.
# It didn't seem to matter heavily on the dtype used.
#
# A 2D array using repeated concatenation requires 2 copies of the array.
#
# The fastest algorithm will depend on the ratio of CPU power to memory
# speed.
# One can monitor the results of the benchmark
# https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
# to tune this parameter until a C version of the `_block_info_recursion`
# algorithm is implemented which would likely be faster than the python
# version.
if list_ndim * final_size > (2 * 512 * 512):
return _block_slicing(arrays, list_ndim, result_ndim)
else:
return _block_concatenate(arrays, list_ndim, result_ndim)
# These helper functions are mostly used for testing.
# They allow us to write tests that directly call `_block_slicing`
# or `_block_concatenate` without blocking large arrays to force the wisdom
# to trigger the desired path.
def _block_setup(arrays):
"""
Returns
(`arrays`, list_ndim, result_ndim, final_size)
"""
bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
list_ndim = len(bottom_index)
if bottom_index and bottom_index[-1] is None:
raise ValueError(
'List at {} cannot be empty'.format(
_block_format_index(bottom_index)
)
)
result_ndim = max(arr_ndim, list_ndim)
return arrays, list_ndim, result_ndim, final_size
def _block_slicing(arrays, list_ndim, result_ndim):
shape, slices, arrays = _block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = _nx.result_type(*[arr.dtype for arr in arrays])
# Test preferring F only in the case that all input arrays are F
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = _nx.empty(shape=shape, dtype=dtype, order=order)
# Note: In a c implementation, the function
# PyArray_CreateMultiSortedStridePerm could be used for more advanced
# guessing of the desired order.
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
def _block_concatenate(arrays, list_ndim, result_ndim):
result = _block(arrays, list_ndim, result_ndim)
if list_ndim == 0:
# Catch an edge case where _block returns a view because
# `arrays` is a single numpy array and not a list of numpy arrays.
# This might copy scalars or lists twice, but this isn't a likely
# usecase for those interested in performance
result = result.copy()
return result
| |
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.config_parser import *
__all__ = [
'ParamAttr', 'ExtraAttr', 'ParameterAttribute', 'ExtraLayerAttribute'
]
def convert_and_compare(x, Type):
"""
Convert x to be the same type as Type and then convert back to
check whether there is a loss of information
:param x: object to be checked
:param Type: target type to check x over
"""
return type(x)(Type(x)) == x
def is_compatible_with(x, Type):
"""
Check if x has a type compatible with Type
:param x: object to be checked
:param Type: target type to check x over
"""
if type(x) == Type:
return True
try:
if float == Type or int == Type:
# avoid those types that can be converted to float/int but not very
# meaningful and could potentially lead to error
# i.e., str and bool typed value should not be used for initializing float/int variable
if not isinstance(x, str) and not isinstance(x, bool):
return convert_and_compare(x, Type)
elif bool == Type:
# should not use string type to initialize bool variable
if not isinstance(x, str):
return convert_and_compare(x, Type)
else:
return False
except:
return False
class ParameterAttribute(object):
"""
Parameter Attributes object. To fine-tuning network training process, user
can set attribute to control training details, such as l1,l2 rate / learning
rate / how to init param.
NOTE: IT IS A HIGH LEVEL USER INTERFACE.
:param is_static: True if this parameter will be fixed while training.
:type is_static: bool
:param initial_std: Gauss Random initialization standard deviation.
None if not using Gauss Random initialize parameter.
:type initial_std: float or None
:param initial_mean: Gauss Random initialization mean.
None if not using Gauss Random initialize parameter.
:type initial_mean: float or None
:param initial_max: Uniform initialization max value.
:type initial_max: float or None
:param initial_min: Uniform initialization min value.
:type initial_min: float or None
:param l1_rate: the l1 regularization factor
:type l1_rate: float or None
:param l2_rate: the l2 regularization factor
:type l2_rate: float or None
:param learning_rate: The parameter learning rate. None means 1.
The learning rate when optimize is LEARNING_RATE =
GLOBAL_LEARNING_RATE * PARAMETER_LEARNING_RATE
* SCHEDULER_FACTOR.
:type learning_rate: float or None
:param momentum: The parameter momentum. None means use global value.
:type momentum: float or None
:param gradient_clipping_threshold: gradient clipping threshold. If gradient
value larger than some value, will be
clipped.
:type gradient_clipping_threshold: float
:param sparse_update: Enable sparse update for this parameter. It will
enable both local and remote sparse update.
:type sparse_update: bool
"""
def __init__(self,
name=None,
is_static=False,
initial_std=None,
initial_mean=None,
initial_max=None,
initial_min=None,
l1_rate=None,
l2_rate=None,
learning_rate=None,
momentum=None,
gradient_clipping_threshold=None,
sparse_update=False):
# initialize strategy.
if is_static:
self.attr = {'is_static': True}
elif initial_std is None and initial_mean is None and initial_max \
is None and initial_min is None:
self.attr = {'initial_smart': True}
elif is_compatible_with(initial_std, float) or \
is_compatible_with(initial_mean, float):
self.attr = dict()
if initial_std is not None:
self.attr['initial_std'] = initial_std
if initial_mean is not None:
self.attr['initial_mean'] = initial_mean
self.attr['initial_strategy'] = 0 # Gauss Random
elif is_compatible_with(initial_max, float) and \
is_compatible_with(initial_min, float):
initial_max = initial_max
initial_min = initial_min
assert initial_min < initial_max
initial_mean = (initial_max + initial_min) / 2
initial_std = initial_mean - initial_min
self.attr = dict()
self.attr['initial_mean'] = initial_mean
self.attr['initial_std'] = initial_std
self.attr['initial_strategy'] = 1 # Uniform Random
else:
raise RuntimeError("Unexpected branch.")
if not is_static and is_compatible_with(l1_rate, float):
self.attr['decay_rate_l1'] = l1_rate
if not is_static and is_compatible_with(l2_rate, float):
self.attr['decay_rate'] = l2_rate
if not is_static and is_compatible_with(learning_rate, float):
self.attr['learning_rate'] = learning_rate
if not is_static and is_compatible_with(momentum, float):
self.attr['momentum'] = momentum
if name is not None:
self.attr['parameter_name'] = name
if sparse_update:
self.attr['sparse_update'] = True
self.attr['sparse_remote_update'] = True
if gradient_clipping_threshold is not None and \
is_compatible_with(gradient_clipping_threshold, float):
self.attr['gradient_clipping_threshold'] = \
gradient_clipping_threshold
def set_default_parameter_name(self, name):
"""
Set default parameter name. If parameter not set, then will use default
parameter name.
:param name: default parameter name.
:type name: basestring
"""
if 'parameter_name' not in self.attr:
self.attr['parameter_name'] = name
@staticmethod
def to_bias(bias_attr):
if isinstance(bias_attr, ParameterAttribute):
return Bias(**bias_attr.attr)
else:
return False
class ExtraLayerAttribute(object):
"""
Some high level layer attributes config. You can set all attributes here,
but some layer doesn't support all attributes. If you set an attribute to a
layer that not support this attribute, paddle will print an error and core.
:param error_clipping_threshold: Error clipping threshold.
:type error_clipping_threshold: float
:param drop_rate: Dropout rate. Dropout will create a mask on layer output.
The dropout rate is the zero rate of this mask. The
details of what dropout is please refer to `here
<https://www.cs.toronto.edu/~hinton/absps/
JMLRdropout.pdf>`_.
:type drop_rate: float
:param device: device ID of layer. device=-1, use CPU. device>0, use GPU.
The details allocation in parallel_nn please refer to `here
<http://www.paddlepaddle.org/doc/ui/cmd_argument/
use_case.html#case-2-specify-layers-in-different-devices>`_.
:type device: int
"""
def __init__(self,
error_clipping_threshold=None,
drop_rate=None,
device=None):
self.attr = dict()
if isinstance(error_clipping_threshold, float):
assert error_clipping_threshold > 0
self.attr["error_clipping_threshold"] = error_clipping_threshold
if isinstance(drop_rate, float):
assert drop_rate > 0
self.attr["drop_rate"] = drop_rate
if isinstance(device, int):
self.attr["device"] = device
def check(self, layer_name):
for key in self.attr:
if not hasattr(self, 'can_%s' % key) or \
not getattr(self, 'can_%s' % key):
raise NotImplementedError("Layer %s cannot support %s" %
(layer_name, key))
@staticmethod
def to_kwargs(attr):
if attr is None:
return dict()
else:
return attr.attr
ParamAttr = ParameterAttribute
ExtraAttr = ExtraLayerAttribute
| |
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import subprocess
import sys
ios_sim_dest = "-destination 'platform=iOS Simulator,name=iPhone 6,OS=latest'"
ios_sim_flags = "-sdk iphonesimulator CODE_SIGN_IDENTITY=\"\" CODE_SIGNING_REQUIRED=NO"
default_workspace = "ADAL.xcworkspace"
use_xcpretty = True
class tclr:
HDR = '\033[1m'
OK = '\033[32m\033[1m'
FAIL = '\033[31m\033[1m'
WARN = '\033[33m\033[1m'
SKIP = '\033[96m\033[1m'
END = '\033[0m'
build_targets = [
{
"name" : "iOS Framework",
"scheme" : "ADAL",
"operations" : [ "build", "test" ],
"platform" : "iOS",
},
{
"name" : "iOS Test App",
"scheme" : "MyTestiOSApp",
"operations" : [ "build" ],
"platform" : "iOS"
},
{
"name" : "iOS Automation Test App",
"scheme" : "ADALAutomation",
"operations" : [ "build" ],
"platform" : "iOS"
},
{
"name" : "Sample Swift App",
"scheme" : "SampleSwiftApp",
"operations" : [ "build" ],
"platform" : "iOS",
"workspace" : "Samples/SampleSwiftApp/SampleSwiftApp.xcworkspace",
},
{
"name" : "Mac Framework",
"scheme" : "ADAL Mac",
"operations" : [ "build", "test" ],
"platform" : "Mac"
},
{
"name" : "Mac Test App",
"scheme" : "MyTestMacOSApp",
"operations" : [ "build" ],
"platform" : "Mac"
}
]
def print_operation_start(name, operation) :
print tclr.HDR + "Beginning " + name + " [" + operation + "]" + tclr.END
print "travis_fold:start:" + (name + "_" + operation).replace(" ", "_")
def print_operation_end(name, operation, exit_code) :
print "travis_fold:end:" + (name + "_" + operation).replace(" ", "_")
if (exit_code == 0) :
print tclr.OK + name + " [" + operation + "] Succeeded" + tclr.END
else :
print tclr.FAIL + name + " [" + operation + "] Failed" + tclr.END
def do_ios_build(target, operation) :
name = target["name"]
scheme = target["scheme"]
project = target.get("project")
workspace = target.get("workspace")
if (workspace == None) :
workspace = default_workspace
print_operation_start(name, operation)
command = "xcodebuild " + operation
if (project != None) :
command += " -project " + project
else :
command += " -workspace " + workspace
command += " -scheme \"" + scheme + "\" -configuration CodeCoverage " + ios_sim_flags + " " + ios_sim_dest
if (use_xcpretty) :
command += " | xcpretty"
print command
exit_code = subprocess.call("set -o pipefail;" + command, shell = True)
print_operation_end(name, operation, exit_code)
return exit_code
def do_mac_build(target, operation) :
arch = target.get("arch")
name = target["name"]
scheme = target["scheme"]
print_operation_start(name, operation)
command = "xcodebuild " + operation + " -workspace " + default_workspace + " -scheme \"" + scheme + "\""
if (arch != None) :
command += " -destination 'arch=" + arch + "'"
if (use_xcpretty) :
command += " | xcpretty"
print command
exit_code = subprocess.call("set -o pipefail;" + command, shell = True)
print_operation_end(name, operation, exit_code)
return exit_code
build_status = dict()
def check_dependencies(target) :
dependencies = target.get("dependencies")
if (dependencies == None) :
return True
for dependency in dependencies :
dependency_status = build_status.get(dependency)
if (dependency_status == None) :
print tclr.SKIP + "Skipping " + name + " dependency " + dependency + " not built yet." + tclr.END
build_status[name] = "Skipped"
return False
if (build_status[dependency] != "Succeeded") :
print tclr.SKIP + "Skipping " + name + " dependency " + dependency + " failed." + tclr.END
build_status[name] = "Skipped"
return False
return True
clean = True
for arg in sys.argv :
if (arg == "--no-clean") :
clean = False
if (arg == "--no-xcpretty") :
use_xcpretty = False
# start by cleaning up any derived data that might be lying around
if (clean) :
subprocess.call("rm -rf ~/Library/Developer/Xcode/DerivedData/ADAL-*", shell=True)
subprocess.call("rm -rf ~/Library/Developer/Xcode/DerivedData/SampleSwiftApp-*", shell=True)
for target in build_targets:
exit_code = 0
name = target["name"]
platform = target["platform"]
# If we don't have the dependencies for this target built yet skip it.
if (not check_dependencies(target)) :
continue
for operation in target["operations"] :
if (exit_code != 0) :
break; # If one operation fails, then the others are almost certainly going to fail too
if (platform == "iOS") :
exit_code = do_ios_build(target, operation)
elif (platform == "Mac") :
exit_code = do_mac_build(target, operation)
else :
raise Exception('Unrecognized platform type ' + platform)
if (exit_code == 0) :
print tclr.OK + name + " Succeeded" + tclr.END
build_status[name] = "Succeeded"
else :
print tclr.FAIL + name + " Failed" + tclr.END
build_status[name] = "Failed"
final_status = 0
print "\n"
for target in build_targets :
project = target["name"]
status = build_status[project]
if (status == "Failed") :
print tclr.FAIL + project + " failed." + tclr.END
final_status = 1
elif (status == "Skipped") :
print tclr.SKIP + '\033[93m' + project + " skipped." + tclr.END
final_status = 1
elif (status == "Succeeded") :
print tclr.OK + '\033[92m' + project + " succeeded." + tclr.END
else :
raise Exception('Unrecognized status: ' + status)
sys.exit(final_status)
| |
# AUTHOR: Kale Miller
# DESCRIPTION: The 'main brain' of the program is held in here.
# 50726f6772616d6d696e6720697320627265616b696e67206f66206f6e652062696720696d706f737369626c65207461736b20696e746f20736576
# 6572616c207665727920736d616c6c20706f737369626c65207461736b732e
# DEVELOPMENT LOG:
# 07/12/16: Initialized file. Moved IDGenerator class into the script. Added holding bay class.
# 12/12/16: Tweaked the IDGenerator class to help remove dependancy.
# 13/12/16: Fleshed out the NewHoldingBay class.
# 15/12/16: Added methods to add auxilary labels. Added method to generate information label. Small bug fixes.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~IMPORTS/GLOBALS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os, time
import numpy as np
from lib import containers
CONTAINER_CLASSES = [
containers.BasicContainer,
containers.HeavyContainer,
containers.RefrigeratedContainer,
containers.LiquidContainer,
containers.ExplosivesContainer,
containers.ToxicContainer,
containers.ChemicalContainer
]
CONTAINER_TYPES = ['basic', 'heavy', 'refrigerated', 'liquid', 'explosive', 'toxic', 'chemical']
SERIAL_CODES = ['B', 'H', 'R', 'L', 'E', 'T', 'C']
TAG_APPLICATION_TIME = 0.2
PRINTALL_TIME = 1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~MAIN~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def processshipfile(filename, path):
"""Processes the csv file that the ship supplies."""
def _deletenewline(string):
"""Deletes the \n symbol from a string if it exists."""
try:
truncatedstring = string[:string.index('\n')]
except ValueError:
truncatedstring = string
finally:
return truncatedstring
try:
home = os.getcwd()
os.chdir(path)
except WindowsError: # Would this hold true on all machines?
raise NameError, "The path specified does not exist."
rawfile = open(filename, 'r')
arylines = rawfile.readlines()
basematrix = map(lambda x: _deletenewline(x).split(','), arylines)
numpyarray = np.array(basematrix)
return numpyarray
class IDGenerator:
"""Controls the assignment of id tags on the containers."""
# TODO: Change the __init__ such that it works by reading a collection of tuples instead of two lists.
def __init__(self):
"""Initialise the id generator."""
self._COUNTERS = [0] * len(CONTAINER_TYPES)
return
def _findindex(self, container):
"""Determines the index in the lists the class should use."""
return CONTAINER_TYPES.index(container)
def _serialcode(self, index):
"""Fetches the serial code for a supplied index."""
return SERIAL_CODES[index]
def _counter(self, index):
"""Fetches the counter for a specific serial type and increments it by one."""
self._COUNTERS[index] += 1
return self._COUNTERS[index]
def newid(self, containertype):
"""Generates a new id."""
ii = self._findindex(containertype)
idtag = self._serialcode(ii) + str(self._counter(ii)).zfill(5)
return idtag
class NewHoldingBay:
"""Creates a new holding bay for the containers. Thus it contains all of the information about the containers
along with the methods controlling unloading and loading them."""
def __init__(self):
self._path = os.getcwd()
self.idgenerator = IDGenerator()
self.containerlist = list()
self._iOnship = 0
self._iLoaded = 0
self._iHolding = 0
return None
def _createcontainer(self, containerstr, parameters):
"""Creates a new container class based off the first column of the CSV."""
# TODO: Fix this method up to catch more and print useful error messages.
if not isinstance(containerstr, str):
raise TypeError, "The parameter passed must be a string."
elif len(containerstr) == 1:
try:
ii = SERIAL_CODES.index(containerstr)
except ValueError:
raise Exception("Bad input.") # TODO: Fix this area up.
elif len(containerstr) != 1:
try:
ii = CONTAINER_TYPES.index(containerstr)
except ValueError:
raise Exception("Bad input.")
idtag = self.idgenerator.newid(CONTAINER_TYPES[ii])
return CONTAINER_CLASSES[ii](idtag, *parameters)
def defineship(self, file):
"""Pass in the CSV file of the ship in order to unload it."""
shipdata = processshipfile(file, self._path)
shipdata = shipdata[1::] # Throw out the headers.
for line in shipdata:
newcontainer = self._createcontainer(line[0], (line[1], line[3]))
self.containerlist.append(newcontainer)
self._iOnship += 1
def printcontainer(self, serial):
"""Prints the information about a specific container."""
for container in self.containerlist:
if container.id() == serial:
container.information()
return None
else:
continue
raise NameError, "Unable to find container with serial code %s" % serial
return -1
def printallinformation(self):
"""Prints the information of all the containers."""
for container in self.containerlist:
container.information()
time.sleep(PRINTALL_TIME)
return None
def unloadall(self, debug=False):
"""Unloads all of the containers from the ship."""
for container in self.containerlist:
container.unload(debug=debug)
self._iHolding += 1
self._iOnship -= 1
return None
def loadall(self, debug=False):
"""Loads all of the containers into trucks and trains."""
# TODO: Proper loading locations.
ii = 1
for container in self.containerlist:
container.load('Truck ' + str(ii).zfill(3), debug=debug)
self._iHolding -= 1
self._iLoaded += 1
ii += 1
return None
def printauditedload(self):
"""Prints information about the holding bay at this time."""
iOnship = 0; iLoaded = 0; iHolding = 0
iContainercount = [0] * len(CONTAINER_TYPES)
for container in self.containerlist:
try:
ii = CONTAINER_TYPES.index(container._type)
iContainercount[ii] += 1
except ValueError:
raise NameError, "One (or more) containers don't have a valid type."
# Print the appropriate information.
print "----------------------------------------------------------------------"
print "TOTAL CONTAINERS: %i" % len(self.containerlist); time.sleep(0.3)
print "CONTAINERS CURRENTLY STILL ON SHIP: %i" % self._iOnship; time.sleep(0.3)
print "CONTAINERS LOADED ON TRUCKS AND TRAINS: %i" % self._iLoaded; time.sleep(0.3)
print "CONTAINERS BEING HELD IN THE HOLDING BAY: %i" % self._iHolding; time.sleep(0.3)
print ""
print "THE NUMBER OF CONTAINERS FOR EACH TYPE:"; time.sleep(0.3)
for ii in xrange(len(CONTAINER_TYPES)):
if iContainercount[ii] == 0: continue
print "\t%s: %i" % (CONTAINER_TYPES[ii], iContainercount[ii]); time.sleep(0.3)
print "----------------------------------------------------------------------"
return None
def addidtags(self, debug=False):
"""Applys appropriate serial numbers to all of the containers."""
for container in self.containerlist:
print "Applying id tag to container %s" % container.id()
if not debug: time.sleep(TAG_APPLICATION_TIME)
container.addidtag()
return None
def applyauxilarylabels(self):
"""Applys the labels that should go on containers about their contents and handling."""
for container in self.containerlist:
print "Adding labels to container %s" % container.id()
container.addauxilarylabels()
return None
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.:.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| |
# -*- coding: utf-8 -*-
"""Tests for pomodoro service."""
import os
import sys
import time
import unittest
import pytest
from unittest.mock import Mock
from pomito import main, pomodoro, task
from pomito.plugins.ui import UIPlugin
from pomito.plugins.task import TaskPlugin
from pomito.test import PomitoTestFactory
class PomodoroServiceTests(unittest.TestCase):
"""Tests for pomodoro service.
- test_break_stopped_without_start
- test_session_stopped_without_start
- test_interruption_stopped_without_start
- test_get_config_gets_value_for_plugin_and_key
- test_get_config_throws_for_invalid_plugin
- test_get_config_throws_for_invalid_key
- test_get_config_throws_for_invalid_inifile
"""
def setUp(self):
test_factory = PomitoTestFactory()
self.pomodoro_service = test_factory.create_fake_service()
self.dummy_task = Mock(spec=task.Task)
self.dummy_callback = Mock()
def tearDown(self):
self.pomodoro_service._pomito_instance.exit()
def test_current_task_none_for_default_pomodoro(self):
assert self.pomodoro_service.current_task is None
def test_current_task_is_set_for_running_session(self):
self.pomodoro_service.start_session(self.dummy_task)
assert self.pomodoro_service.current_task == self.dummy_task
self.pomodoro_service.stop_session()
def test_current_task_none_after_session_stop(self):
self.pomodoro_service.start_session(self.dummy_task)
self.pomodoro_service.stop_session()
assert self.pomodoro_service.current_task is None
def test_get_config_gets_value_for_plugin_and_key(self):
pass
def test_get_config_returns_none_invalid_plugin(self):
val = self.pomodoro_service.get_config("dummy_plugin", "dummy_key")
assert val is None
def test_get_task_plugins_gets_list_of_all_task_plugins(self):
from pomito import plugins
plugins.PLUGINS = {'a': plugins.task.nulltask.NullTask(None),
'b': self.pomodoro_service}
task_plugins = self.pomodoro_service.get_task_plugins()
assert task_plugins == [plugins.PLUGINS['a']]
def test_get_tasks_returns_tasks_for_the_user(self):
self.pomodoro_service.get_tasks()
self.pomodoro_service \
._pomito_instance \
.task_plugin.get_tasks.assert_called_once_with()
def test_get_tasks_by_filter_returns_tasks_match_filter(self):
self.pomodoro_service.get_tasks_by_filter("dummy_filter")
self.pomodoro_service \
._pomito_instance \
.task_plugin.get_tasks_by_filter \
.assert_called_once_with("dummy_filter")
def test_get_task_by_id_returns_task_matching_task_idish(self):
self.pomodoro_service.get_task_by_id(10)
self.pomodoro_service \
._pomito_instance \
.task_plugin.get_task_by_id \
.assert_called_once_with(10)
def test_start_session_throws_if_no_task_is_provided(self):
self.assertRaises(Exception, self.pomodoro_service.start_session, None)
def test_stop_session_waits_for_timer_thread_to_join(self):
self.pomodoro_service.start_session(self.dummy_task)
assert self.pomodoro_service._timer.is_alive()
self.pomodoro_service.stop_session()
assert self.pomodoro_service._timer.is_alive() is False
def test_stop_break_waits_for_timer_thread_to_join(self):
self.pomodoro_service.start_break()
assert self.pomodoro_service._timer.is_alive()
self.pomodoro_service.stop_break()
assert self.pomodoro_service._timer.is_alive() is False
def test_session_started_is_called_with_correct_session_count(self):
self.pomodoro_service.signal_session_started \
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_session(self.dummy_task)
self.dummy_callback.assert_called_once_with(None,
session_count=0,
session_duration=600,
task=self.dummy_task)
self.pomodoro_service.signal_session_started \
.disconnect(self.dummy_callback)
self.pomodoro_service.stop_session()
def test_session_stopped_for_reason_interrupt(self):
self.pomodoro_service.signal_session_stopped \
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_session(self.dummy_task)
self.pomodoro_service.stop_session()
self.dummy_callback.\
assert_called_once_with(None, session_count=0,
task=self.dummy_task,
reason=pomodoro.TimerChange.INTERRUPT)
self.pomodoro_service.signal_session_stopped \
.disconnect(self.dummy_callback)
def test_session_stopped_for_reason_complete(self):
self.pomodoro_service.signal_session_stopped \
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_session(self.dummy_task)
self.pomodoro_service._timer.trigger_callback(pomodoro.TimerChange.COMPLETE)
self.dummy_callback.assert_called_once_with(None, session_count=1,
task=self.dummy_task,
reason=pomodoro.TimerChange.COMPLETE)
self.pomodoro_service.signal_session_stopped\
.disconnect(self.dummy_callback)
def test_break_started_shortbreak(self):
self._test_break_started(pomodoro.TimerType.SHORT_BREAK, 120)
def test_break_started_longbreak(self):
self.pomodoro_service._session_count = 4
self._test_break_started(pomodoro.TimerType.LONG_BREAK, 300)
def _test_break_started(self, break_type, duration):
self.pomodoro_service.signal_break_started \
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_break()
self.dummy_callback\
.assert_called_once_with(None,
break_type=break_type,
break_duration=duration)
self.pomodoro_service.stop_break()
self.pomodoro_service.signal_break_started \
.disconnect(self.dummy_callback)
def test_break_stopped_shortbreak_for_reason_complete(self):
self.pomodoro_service.signal_break_stopped\
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_break()
self.pomodoro_service._timer.trigger_callback(pomodoro.TimerChange.COMPLETE)
self.dummy_callback.assert_called_once_with(None,
break_type=pomodoro.TimerType.SHORT_BREAK,
reason=pomodoro.TimerChange.COMPLETE)
self.pomodoro_service.signal_break_stopped\
.disconnect(self.dummy_callback)
def test_break_stopped_shortbreak_for_reason_interrupt(self):
self.pomodoro_service.signal_break_stopped\
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_break()
self.pomodoro_service.stop_break()
self.dummy_callback.assert_called_once_with(None,
break_type=pomodoro.TimerType.SHORT_BREAK,
reason=pomodoro.TimerChange.INTERRUPT)
self.pomodoro_service.signal_break_stopped\
.disconnect(self.dummy_callback)
def test_break_stopped_longbreak_for_interrupt(self):
self.pomodoro_service._session_count = 4
self.pomodoro_service.signal_break_stopped\
.connect(self.dummy_callback, weak=False)
self.pomodoro_service.start_break()
self.pomodoro_service.stop_break()
self.dummy_callback.assert_called_once_with(None,
break_type=pomodoro.TimerType.LONG_BREAK,
reason=pomodoro.TimerChange.INTERRUPT)
self.pomodoro_service.signal_break_stopped\
.disconnect(self.dummy_callback)
def test_get_data_dir_returns_correct_default(self):
expected_data_dir = os.path.join(os.path.expanduser("~"), "pomito")
if sys.platform.startswith("linux"):
home_dir = os.getenv("HOME")
alt_data_dir = os.path.join(home_dir, ".local/share")
expected_data_dir = os.path\
.join(os.getenv("XDG_DATA_HOME") or alt_data_dir, "pomito")
data_dir = self.pomodoro_service.get_data_dir()
assert data_dir == expected_data_dir
def test_get_db_returns_a_valid_database(self):
test_db = "dummy_db"
pomodoro_service = pomodoro.Pomodoro(main.Pomito(database=test_db))
assert pomodoro_service.get_db() == test_db
@pytest.mark.perf
def test_session_started_perf(self):
t = Mock(spec=task.Task)
pomito = main.Pomito(None)
pomito.ui_plugin = DummyUIPlugin()
pomito.task_plugin = Mock(spec=TaskPlugin)
pomito._message_dispatcher.start()
pomito.pomodoro_service.signal_session_started \
.connect(pomito.ui_plugin.notify_session_started, weak=False)
time_start = time.time() # initial timestamp
pomito.pomodoro_service.start_session(t)
time.sleep(1)
time_end = pomito.ui_plugin.timestamp
self.assertAlmostEqual(time_start, time_end, delta=0.1)
pomito.ui_plugin.timestamp = None
pomito.pomodoro_service.stop_session()
pomito.exit()
class TimerTests(unittest.TestCase):
def setUp(self):
self.timestamp_start = 0.0
self.timestamp_end = 0.0
self.delta = 0.0
self.mock_callback = Mock()
def tearDown(self):
self.timestamp_start = self.timestamp_end = self.delta = 0.0
def dummy_callback(self, reason='whatever'):
self.timestamp_end = time.time()
self.delta += (self.timestamp_end - self.timestamp_start)
self.timestamp_start = self.timestamp_end
self.reason = reason
def test_mock_callback_reason_increment_and_complete(self):
timer = pomodoro.Timer(0.2, self.mock_callback, 0.1)
timer.start()
time.sleep(0.3)
assert self.mock_callback.call_count == 2
self.assertListEqual(self.mock_callback.call_args_list,
[((pomodoro.TimerChange.INCREMENT,), {}), ((pomodoro.TimerChange.COMPLETE,), {})],
'invalid notify_reason')
def test_mock_callback_reason_interrupt(self):
timer = pomodoro.Timer(10, self.mock_callback, 1)
timer.start()
timer.stop()
time.sleep(0.1)
assert self.mock_callback.call_count == 1
self.assertListEqual(self.mock_callback.call_args_list,
[((pomodoro.TimerChange.INTERRUPT,), {})],
'invalid notify_reason')
def test_start_throws_when_called_on_same_thread(self):
def callback_with_catch(reason):
try:
timer.start()
assert False # expect previous call to throw
except RuntimeError:
pass
timer = pomodoro.Timer(10, callback_with_catch, 1)
timer.start()
timer.stop()
time.sleep(0.1)
def test_stop_throws_when_called_on_same_thread(self):
def callback_with_catch(reason):
try:
timer.stop()
assert False # expect previous call to throw
except RuntimeError:
pass
timer = pomodoro.Timer(10, callback_with_catch, 1)
timer.start()
timer.stop()
time.sleep(0.1)
@pytest.mark.perf
def test_callback_granular(self):
duration = 60.00
delta_granular = 1.0 # windows
if sys.platform.startswith("linux"):
delta_granular = 0.03
timer = pomodoro.Timer(duration, self.dummy_callback)
self.timestamp_start = time.time()
timer.start()
time.sleep(duration + 2)
assert self.reason == pomodoro.TimerChange.COMPLETE
self.assertAlmostEqual(self.delta, duration, delta=delta_granular)
class DummyUIPlugin(UIPlugin):
def __init__(self):
"""Create an instance of dummy plugin."""
self.timestamp = 100.0
return
def run(self):
pass
def notify_session_started(self, sender, **kwargs):
self.timestamp = time.time()
return
def initialize(self):
pass
| |
"""Defines parsing functions used by isort for parsing import definitions"""
from collections import OrderedDict, defaultdict
from functools import partial
from itertools import chain
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Tuple
from warnings import warn
from . import place
from .comments import parse as parse_comments
from .deprecated.finders import FindersManager
from .settings import DEFAULT_CONFIG, Config
if TYPE_CHECKING:
from mypy_extensions import TypedDict
CommentsAboveDict = TypedDict(
"CommentsAboveDict", {"straight": Dict[str, Any], "from": Dict[str, Any]}
)
CommentsDict = TypedDict(
"CommentsDict",
{
"from": Dict[str, Any],
"straight": Dict[str, Any],
"nested": Dict[str, Any],
"above": CommentsAboveDict,
},
)
def _infer_line_separator(contents: str) -> str:
if "\r\n" in contents:
return "\r\n"
elif "\r" in contents:
return "\r"
else:
return "\n"
def _normalize_line(raw_line: str) -> Tuple[str, str]:
"""Normalizes import related statements in the provided line.
Returns (normalized_line: str, raw_line: str)
"""
line = raw_line.replace("from.import ", "from . import ")
line = line.replace("from.cimport ", "from . cimport ")
line = line.replace("import*", "import *")
line = line.replace(" .import ", " . import ")
line = line.replace(" .cimport ", " . cimport ")
line = line.replace("\t", " ")
return (line, raw_line)
def import_type(line: str, config: Config = DEFAULT_CONFIG) -> Optional[str]:
"""If the current line is an import line it will return its type (from or straight)"""
if config.honor_noqa and line.lower().rstrip().endswith("noqa"):
return None
elif "isort:skip" in line or "isort: skip" in line or "isort: split" in line:
return None
elif line.startswith(("import ", "cimport ")):
return "straight"
elif line.startswith("from "):
return "from"
return None
def _strip_syntax(import_string: str) -> str:
import_string = import_string.replace("_import", "[[i]]")
import_string = import_string.replace("_cimport", "[[ci]]")
for remove_syntax in ["\\", "(", ")", ","]:
import_string = import_string.replace(remove_syntax, " ")
import_list = import_string.split()
for key in ("from", "import", "cimport"):
if key in import_list:
import_list.remove(key)
import_string = " ".join(import_list)
import_string = import_string.replace("[[i]]", "_import")
import_string = import_string.replace("[[ci]]", "_cimport")
return import_string.replace("{ ", "{|").replace(" }", "|}")
def skip_line(
line: str,
in_quote: str,
index: int,
section_comments: Tuple[str, ...],
needs_import: bool = True,
) -> Tuple[bool, str]:
"""Determine if a given line should be skipped.
Returns back a tuple containing:
(skip_line: bool,
in_quote: str,)
"""
should_skip = bool(in_quote)
if '"' in line or "'" in line:
char_index = 0
while char_index < len(line):
if line[char_index] == "\\":
char_index += 1
elif in_quote:
if line[char_index : char_index + len(in_quote)] == in_quote:
in_quote = ""
elif line[char_index] in ("'", '"'):
long_quote = line[char_index : char_index + 3]
if long_quote in ('"""', "'''"):
in_quote = long_quote
char_index += 2
else:
in_quote = line[char_index]
elif line[char_index] == "#":
break
char_index += 1
if ";" in line.split("#")[0] and needs_import:
for part in (part.strip() for part in line.split(";")):
if (
part
and not part.startswith("from ")
and not part.startswith(("import ", "cimport "))
):
should_skip = True
return (bool(should_skip or in_quote), in_quote)
class ParsedContent(NamedTuple):
in_lines: List[str]
lines_without_imports: List[str]
import_index: int
place_imports: Dict[str, List[str]]
import_placements: Dict[str, str]
as_map: Dict[str, Dict[str, List[str]]]
imports: Dict[str, Dict[str, Any]]
categorized_comments: "CommentsDict"
change_count: int
original_line_count: int
line_separator: str
sections: Any
verbose_output: List[str]
def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedContent:
"""Parses a python file taking out and categorizing imports."""
line_separator: str = config.line_ending or _infer_line_separator(contents)
in_lines = contents.splitlines()
if contents and contents[-1] in ("\n", "\r"):
in_lines.append("")
out_lines = []
original_line_count = len(in_lines)
if config.old_finders:
finder = FindersManager(config=config).find
else:
finder = partial(place.module, config=config)
line_count = len(in_lines)
place_imports: Dict[str, List[str]] = {}
import_placements: Dict[str, str] = {}
as_map: Dict[str, Dict[str, List[str]]] = {
"straight": defaultdict(list),
"from": defaultdict(list),
}
imports: OrderedDict[str, Dict[str, Any]] = OrderedDict()
verbose_output: List[str] = []
for section in chain(config.sections, config.forced_separate):
imports[section] = {"straight": OrderedDict(), "from": OrderedDict()}
categorized_comments: CommentsDict = {
"from": {},
"straight": {},
"nested": {},
"above": {"straight": {}, "from": {}},
}
index = 0
import_index = -1
in_quote = ""
while index < line_count:
line = in_lines[index]
index += 1
statement_index = index
(skipping_line, in_quote) = skip_line(
line, in_quote=in_quote, index=index, section_comments=config.section_comments
)
if line in config.section_comments and not skipping_line:
if import_index == -1:
import_index = index - 1
continue
if "isort:imports-" in line and line.startswith("#"):
section = line.split("isort:imports-")[-1].split()[0].upper()
place_imports[section] = []
import_placements[line] = section
elif "isort: imports-" in line and line.startswith("#"):
section = line.split("isort: imports-")[-1].split()[0].upper()
place_imports[section] = []
import_placements[line] = section
if skipping_line:
out_lines.append(line)
continue
lstripped_line = line.lstrip()
if (
config.float_to_top
and import_index == -1
and line
and not in_quote
and not lstripped_line.startswith("#")
and not lstripped_line.startswith("'''")
and not lstripped_line.startswith('"""')
):
if not lstripped_line.startswith("import") and not lstripped_line.startswith("from"):
import_index = index - 1
while import_index and not in_lines[import_index - 1]:
import_index -= 1
else:
commentless = line.split("#", 1)[0].strip()
if (
("isort:skip" in line or "isort: skip" in line)
and "(" in commentless
and ")" not in commentless
):
import_index = index
starting_line = line
while "isort:skip" in starting_line or "isort: skip" in starting_line:
commentless = starting_line.split("#", 1)[0]
if (
"(" in commentless
and not commentless.rstrip().endswith(")")
and import_index < line_count
):
while import_index < line_count and not commentless.rstrip().endswith(
")"
):
commentless = in_lines[import_index].split("#", 1)[0]
import_index += 1
else:
import_index += 1
if import_index >= line_count:
break
else:
starting_line = in_lines[import_index]
line, *end_of_line_comment = line.split("#", 1)
if ";" in line:
statements = [line.strip() for line in line.split(";")]
else:
statements = [line]
if end_of_line_comment:
statements[-1] = f"{statements[-1]}#{end_of_line_comment[0]}"
for statement in statements:
line, raw_line = _normalize_line(statement)
type_of_import = import_type(line, config) or ""
if not type_of_import:
out_lines.append(raw_line)
continue
if import_index == -1:
import_index = index - 1
nested_comments = {}
import_string, comment = parse_comments(line)
comments = [comment] if comment else []
line_parts = [part for part in _strip_syntax(import_string).strip().split(" ") if part]
if type_of_import == "from" and len(line_parts) == 2 and comments:
nested_comments[line_parts[-1]] = comments[0]
if "(" in line.split("#", 1)[0] and index < line_count:
while not line.split("#")[0].strip().endswith(")") and index < line_count:
line, new_comment = parse_comments(in_lines[index])
index += 1
if new_comment:
comments.append(new_comment)
stripped_line = _strip_syntax(line).strip()
if (
type_of_import == "from"
and stripped_line
and " " not in stripped_line.replace(" as ", "")
and new_comment
):
nested_comments[stripped_line] = comments[-1]
import_string += line_separator + line
else:
while line.strip().endswith("\\"):
line, new_comment = parse_comments(in_lines[index])
index += 1
if new_comment:
comments.append(new_comment)
# Still need to check for parentheses after an escaped line
if (
"(" in line.split("#")[0]
and ")" not in line.split("#")[0]
and index < line_count
):
stripped_line = _strip_syntax(line).strip()
if (
type_of_import == "from"
and stripped_line
and " " not in stripped_line.replace(" as ", "")
and new_comment
):
nested_comments[stripped_line] = comments[-1]
import_string += line_separator + line
while not line.split("#")[0].strip().endswith(")") and index < line_count:
line, new_comment = parse_comments(in_lines[index])
index += 1
if new_comment:
comments.append(new_comment)
stripped_line = _strip_syntax(line).strip()
if (
type_of_import == "from"
and stripped_line
and " " not in stripped_line.replace(" as ", "")
and new_comment
):
nested_comments[stripped_line] = comments[-1]
import_string += line_separator + line
stripped_line = _strip_syntax(line).strip()
if (
type_of_import == "from"
and stripped_line
and " " not in stripped_line.replace(" as ", "")
and new_comment
):
nested_comments[stripped_line] = comments[-1]
if import_string.strip().endswith(
(" import", " cimport")
) or line.strip().startswith(("import ", "cimport ")):
import_string += line_separator + line
else:
import_string = import_string.rstrip().rstrip("\\") + " " + line.lstrip()
if type_of_import == "from":
cimports: bool
import_string = (
import_string.replace("import(", "import (")
.replace("\\", " ")
.replace("\n", " ")
)
if " cimport " in import_string:
parts = import_string.split(" cimport ")
cimports = True
else:
parts = import_string.split(" import ")
cimports = False
from_import = parts[0].split(" ")
import_string = (" cimport " if cimports else " import ").join(
[from_import[0] + " " + "".join(from_import[1:])] + parts[1:]
)
just_imports = [
item.replace("{|", "{ ").replace("|}", " }")
for item in _strip_syntax(import_string).split()
]
attach_comments_to: Optional[List[Any]] = None
direct_imports = just_imports[1:]
straight_import = True
if "as" in just_imports and (just_imports.index("as") + 1) < len(just_imports):
straight_import = False
while "as" in just_imports:
nested_module = None
as_index = just_imports.index("as")
if type_of_import == "from":
nested_module = just_imports[as_index - 1]
top_level_module = just_imports[0]
module = top_level_module + "." + nested_module
as_name = just_imports[as_index + 1]
direct_imports.remove(nested_module)
direct_imports.remove(as_name)
direct_imports.remove("as")
if nested_module == as_name and config.remove_redundant_aliases:
pass
elif as_name not in as_map["from"][module]:
as_map["from"][module].append(as_name)
full_name = f"{nested_module} as {as_name}"
associated_comment = nested_comments.get(full_name)
if associated_comment:
categorized_comments["nested"].setdefault(top_level_module, {})[
full_name
] = associated_comment
if associated_comment in comments:
comments.pop(comments.index(associated_comment))
else:
module = just_imports[as_index - 1]
as_name = just_imports[as_index + 1]
if module == as_name and config.remove_redundant_aliases:
pass
elif as_name not in as_map["straight"][module]:
as_map["straight"][module].append(as_name)
if comments and attach_comments_to is None:
if nested_module and config.combine_as_imports:
attach_comments_to = categorized_comments["from"].setdefault(
f"{top_level_module}.__combined_as__", []
)
else:
attach_comments_to = categorized_comments["straight"].setdefault(
module, []
)
del just_imports[as_index : as_index + 2]
if type_of_import == "from":
import_from = just_imports.pop(0)
placed_module = finder(import_from)
if config.verbose and not config.only_modified:
print(f"from-type place_module for {import_from} returned {placed_module}")
elif config.verbose:
verbose_output.append(
f"from-type place_module for {import_from} returned {placed_module}"
)
if placed_module == "":
warn(
f"could not place module {import_from} of line {line} --"
" Do you need to define a default section?"
)
root = imports[placed_module][type_of_import] # type: ignore
for import_name in just_imports:
associated_comment = nested_comments.get(import_name)
if associated_comment:
categorized_comments["nested"].setdefault(import_from, {})[
import_name
] = associated_comment
if associated_comment in comments:
comments.pop(comments.index(associated_comment))
if comments and attach_comments_to is None:
attach_comments_to = categorized_comments["from"].setdefault(import_from, [])
if len(out_lines) > max(import_index, 1) - 1:
last = out_lines and out_lines[-1].rstrip() or ""
while (
last.startswith("#")
and not last.endswith('"""')
and not last.endswith("'''")
and "isort:imports-" not in last
and "isort: imports-" not in last
and not config.treat_all_comments_as_code
and not last.strip() in config.treat_comments_as_code
):
categorized_comments["above"]["from"].setdefault(import_from, []).insert(
0, out_lines.pop(-1)
)
if out_lines:
last = out_lines[-1].rstrip()
else:
last = ""
if statement_index - 1 == import_index: # pragma: no cover
import_index -= len(
categorized_comments["above"]["from"].get(import_from, [])
)
if import_from not in root:
root[import_from] = OrderedDict(
(module, module in direct_imports) for module in just_imports
)
else:
root[import_from].update(
(module, root[import_from].get(module, False) or module in direct_imports)
for module in just_imports
)
if comments and attach_comments_to is not None:
attach_comments_to.extend(comments)
else:
if comments and attach_comments_to is not None:
attach_comments_to.extend(comments)
comments = []
for module in just_imports:
if comments:
categorized_comments["straight"][module] = comments
comments = []
if len(out_lines) > max(import_index, +1, 1) - 1:
last = out_lines and out_lines[-1].rstrip() or ""
while (
last.startswith("#")
and not last.endswith('"""')
and not last.endswith("'''")
and "isort:imports-" not in last
and "isort: imports-" not in last
and not config.treat_all_comments_as_code
and not last.strip() in config.treat_comments_as_code
):
categorized_comments["above"]["straight"].setdefault(module, []).insert(
0, out_lines.pop(-1)
)
if out_lines:
last = out_lines[-1].rstrip()
else:
last = ""
if index - 1 == import_index:
import_index -= len(
categorized_comments["above"]["straight"].get(module, [])
)
placed_module = finder(module)
if config.verbose and not config.only_modified:
print(f"else-type place_module for {module} returned {placed_module}")
elif config.verbose:
verbose_output.append(
f"else-type place_module for {module} returned {placed_module}"
)
if placed_module == "":
warn(
f"could not place module {module} of line {line} --"
" Do you need to define a default section?"
)
imports.setdefault("", {"straight": OrderedDict(), "from": OrderedDict()})
straight_import |= imports[placed_module][type_of_import].get( # type: ignore
module, False
)
imports[placed_module][type_of_import][module] = straight_import # type: ignore
change_count = len(out_lines) - original_line_count
return ParsedContent(
in_lines=in_lines,
lines_without_imports=out_lines,
import_index=import_index,
place_imports=place_imports,
import_placements=import_placements,
as_map=as_map,
imports=imports,
categorized_comments=categorized_comments,
change_count=change_count,
original_line_count=original_line_count,
line_separator=line_separator,
sections=config.sections,
verbose_output=verbose_output,
)
| |
import inspect
import re
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.middleware.csrf import rotate_token
from django.utils.crypto import constant_time_compare
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
HASH_SESSION_KEY = '_auth_user_hash'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_string(path)()
def _get_backends(return_tuples=False):
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
backends.append((backend, backend_path) if return_tuples else backend)
if not backends:
raise ImproperlyConfigured(
'No authentication backends have been defined. Does '
'AUTHENTICATION_BACKENDS contain anything?'
)
return backends
def get_backends():
return _get_backends(return_tuples=False)
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def _get_user_session_key(request):
# This value in the session is always serialized to a string, so we need
# to convert it back to Python whenever we access it.
return get_user_model()._meta.pk.to_python(request.session[SESSION_KEY])
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend, backend_path in _get_backends(return_tuples=True):
try:
inspect.getcallargs(backend.authenticate, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(**credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
return None
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = backend_path
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
session_auth_hash = ''
if user is None:
user = request.user
if hasattr(user, 'get_session_auth_hash'):
session_auth_hash = user.get_session_auth_hash()
if SESSION_KEY in request.session:
if _get_user_session_key(request) != user.pk or (
session_auth_hash and
request.session.get(HASH_SESSION_KEY) != session_auth_hash):
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
request.session[BACKEND_SESSION_KEY] = user.backend
request.session[HASH_SESSION_KEY] = session_auth_hash
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
language = request.session.get(LANGUAGE_SESSION_KEY)
request.session.flush()
if language is not None:
request.session[LANGUAGE_SESSION_KEY] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Returns the User model that is active in this project.
"""
try:
return django_apps.get_model(settings.AUTH_USER_MODEL)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL
)
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
user = None
try:
user_id = _get_user_session_key(request)
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id)
# Verify the session
if ('django.contrib.auth.middleware.SessionAuthenticationMiddleware'
in settings.MIDDLEWARE_CLASSES and hasattr(user, 'get_session_auth_hash')):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or AnonymousUser()
def get_permission_codename(action, opts):
"""
Returns the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
def update_session_auth_hash(request, user):
"""
Updating a user's password logs out all sessions for the user if
django.contrib.auth.middleware.SessionAuthenticationMiddleware is enabled.
This function takes the current request and the updated user object from
which the new session hash will be derived and updates the session hash
appropriately to prevent a password change from logging out the session
from which the password was changed.
"""
if hasattr(user, 'get_session_auth_hash') and request.user == user:
request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
default_app_config = 'django.contrib.auth.apps.AuthConfig'
| |
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extended protorpc descriptors.
This takes existing protorpc Descriptor classes and adds extra
properties not directly supported in proto itself, notably field and
message descriptions. We need this in order to generate protorpc
message files with comments.
Note that for most of these classes, we can't simply wrap the existing
message, since we need to change the type of the subfields. We could
have a "plain" descriptor attached, but that seems like unnecessary
bookkeeping. Where possible, we purposely reuse existing tag numbers;
for new fields, we start numbering at 100.
"""
import abc
import operator
import textwrap
import six
from apitools.base.protorpclite import descriptor as protorpc_descriptor
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.py import extra_types
class ExtendedEnumValueDescriptor(messages.Message):
"""Enum value descriptor with additional fields.
Fields:
name: Name of enumeration value.
number: Number of enumeration value.
description: Description of this enum value.
"""
name = messages.StringField(1)
number = messages.IntegerField(2, variant=messages.Variant.INT32)
description = messages.StringField(100)
class ExtendedEnumDescriptor(messages.Message):
"""Enum class descriptor with additional fields.
Fields:
name: Name of Enum without any qualification.
values: Values defined by Enum class.
description: Description of this enum class.
full_name: Fully qualified name of this enum class.
enum_mappings: Mappings from python to JSON names for enum values.
"""
class JsonEnumMapping(messages.Message):
"""Mapping from a python name to the wire name for an enum."""
python_name = messages.StringField(1)
json_name = messages.StringField(2)
name = messages.StringField(1)
values = messages.MessageField(
ExtendedEnumValueDescriptor, 2, repeated=True)
description = messages.StringField(100)
full_name = messages.StringField(101)
enum_mappings = messages.MessageField(
'JsonEnumMapping', 102, repeated=True)
class ExtendedFieldDescriptor(messages.Message):
"""Field descriptor with additional fields.
Fields:
field_descriptor: The underlying field descriptor.
name: The name of this field.
description: Description of this field.
"""
field_descriptor = messages.MessageField(
protorpc_descriptor.FieldDescriptor, 100)
# We duplicate the names for easier bookkeeping.
name = messages.StringField(101)
description = messages.StringField(102)
class ExtendedMessageDescriptor(messages.Message):
"""Message descriptor with additional fields.
Fields:
name: Name of Message without any qualification.
fields: Fields defined for message.
message_types: Nested Message classes defined on message.
enum_types: Nested Enum classes defined on message.
description: Description of this message.
full_name: Full qualified name of this message.
decorators: Decorators to include in the definition when printing.
Printed in the given order from top to bottom (so the last entry
is the innermost decorator).
alias_for: This type is just an alias for the named type.
field_mappings: Mappings from python to json field names.
"""
class JsonFieldMapping(messages.Message):
"""Mapping from a python name to the wire name for a field."""
python_name = messages.StringField(1)
json_name = messages.StringField(2)
name = messages.StringField(1)
fields = messages.MessageField(ExtendedFieldDescriptor, 2, repeated=True)
message_types = messages.MessageField(
'extended_descriptor.ExtendedMessageDescriptor', 3, repeated=True)
enum_types = messages.MessageField(
ExtendedEnumDescriptor, 4, repeated=True)
description = messages.StringField(100)
full_name = messages.StringField(101)
decorators = messages.StringField(102, repeated=True)
alias_for = messages.StringField(103)
field_mappings = messages.MessageField(
'JsonFieldMapping', 104, repeated=True)
class ExtendedFileDescriptor(messages.Message):
"""File descriptor with additional fields.
Fields:
package: Fully qualified name of package that definitions belong to.
message_types: Message definitions contained in file.
enum_types: Enum definitions contained in file.
description: Description of this file.
additional_imports: Extra imports used in this package.
"""
package = messages.StringField(2)
message_types = messages.MessageField(
ExtendedMessageDescriptor, 4, repeated=True)
enum_types = messages.MessageField(
ExtendedEnumDescriptor, 5, repeated=True)
description = messages.StringField(100)
additional_imports = messages.StringField(101, repeated=True)
def _WriteFile(file_descriptor, package, version, proto_printer):
"""Write the given extended file descriptor to the printer."""
proto_printer.PrintPreamble(package, version, file_descriptor)
_PrintEnums(proto_printer, file_descriptor.enum_types)
_PrintMessages(proto_printer, file_descriptor.message_types)
custom_json_mappings = _FetchCustomMappings(file_descriptor.enum_types)
custom_json_mappings.extend(
_FetchCustomMappings(file_descriptor.message_types))
for mapping in custom_json_mappings:
proto_printer.PrintCustomJsonMapping(mapping)
def WriteMessagesFile(file_descriptor, package, version, printer):
"""Write the given extended file descriptor to out as a message file."""
_WriteFile(file_descriptor, package, version,
_Proto2Printer(printer))
def WritePythonFile(file_descriptor, package, version, printer):
"""Write the given extended file descriptor to out."""
_WriteFile(file_descriptor, package, version,
_ProtoRpcPrinter(printer))
def PrintIndentedDescriptions(printer, ls, name, prefix=''):
if ls:
with printer.Indent(indent=prefix):
with printer.CommentContext():
width = printer.CalculateWidth() - len(prefix)
printer()
printer(name + ':')
for x in ls:
description = '%s: %s' % (x.name, x.description)
for line in textwrap.wrap(description, width,
initial_indent=' ',
subsequent_indent=' '):
printer(line)
def _FetchCustomMappings(descriptor_ls):
"""Find and return all custom mappings for descriptors in descriptor_ls."""
custom_mappings = []
for descriptor in descriptor_ls:
if isinstance(descriptor, ExtendedEnumDescriptor):
custom_mappings.extend(
_FormatCustomJsonMapping('Enum', m, descriptor)
for m in descriptor.enum_mappings)
elif isinstance(descriptor, ExtendedMessageDescriptor):
custom_mappings.extend(
_FormatCustomJsonMapping('Field', m, descriptor)
for m in descriptor.field_mappings)
custom_mappings.extend(
_FetchCustomMappings(descriptor.enum_types))
custom_mappings.extend(
_FetchCustomMappings(descriptor.message_types))
return custom_mappings
def _FormatCustomJsonMapping(mapping_type, mapping, descriptor):
return '\n'.join((
'encoding.AddCustomJson%sMapping(' % mapping_type,
" %s, '%s', '%s')" % (descriptor.full_name, mapping.python_name,
mapping.json_name),
))
def _EmptyMessage(message_type):
return not any((message_type.enum_types,
message_type.message_types,
message_type.fields))
class ProtoPrinter(six.with_metaclass(abc.ABCMeta, object)):
"""Interface for proto printers."""
@abc.abstractmethod
def PrintPreamble(self, package, version, file_descriptor):
"""Print the file docstring and import lines."""
@abc.abstractmethod
def PrintEnum(self, enum_type):
"""Print the given enum declaration."""
@abc.abstractmethod
def PrintMessage(self, message_type):
"""Print the given message declaration."""
class _Proto2Printer(ProtoPrinter):
"""Printer for proto2 definitions."""
def __init__(self, printer):
self.__printer = printer
def __PrintEnumCommentLines(self, enum_type):
description = enum_type.description or '%s enum type.' % enum_type.name
for line in textwrap.wrap(description,
self.__printer.CalculateWidth() - 3):
self.__printer('// %s', line)
PrintIndentedDescriptions(self.__printer, enum_type.values, 'Values',
prefix='// ')
def __PrintEnumValueCommentLines(self, enum_value):
if enum_value.description:
width = self.__printer.CalculateWidth() - 3
for line in textwrap.wrap(enum_value.description, width):
self.__printer('// %s', line)
def PrintEnum(self, enum_type):
self.__PrintEnumCommentLines(enum_type)
self.__printer('enum %s {', enum_type.name)
with self.__printer.Indent():
enum_values = sorted(
enum_type.values, key=operator.attrgetter('number'))
for enum_value in enum_values:
self.__printer()
self.__PrintEnumValueCommentLines(enum_value)
self.__printer('%s = %s;', enum_value.name, enum_value.number)
self.__printer('}')
self.__printer()
def PrintPreamble(self, package, version, file_descriptor):
self.__printer('// Generated message classes for %s version %s.',
package, version)
self.__printer('// NOTE: This file is autogenerated and should not be '
'edited by hand.')
description_lines = textwrap.wrap(file_descriptor.description, 75)
if description_lines:
self.__printer('//')
for line in description_lines:
self.__printer('// %s', line)
self.__printer()
self.__printer('syntax = "proto2";')
self.__printer('package %s;', file_descriptor.package)
def __PrintMessageCommentLines(self, message_type):
"""Print the description of this message."""
description = message_type.description or '%s message type.' % (
message_type.name)
width = self.__printer.CalculateWidth() - 3
for line in textwrap.wrap(description, width):
self.__printer('// %s', line)
PrintIndentedDescriptions(self.__printer, message_type.enum_types,
'Enums', prefix='// ')
PrintIndentedDescriptions(self.__printer, message_type.message_types,
'Messages', prefix='// ')
PrintIndentedDescriptions(self.__printer, message_type.fields,
'Fields', prefix='// ')
def __PrintFieldDescription(self, description):
for line in textwrap.wrap(description,
self.__printer.CalculateWidth() - 3):
self.__printer('// %s', line)
def __PrintFields(self, fields):
for extended_field in fields:
field = extended_field.field_descriptor
field_type = messages.Field.lookup_field_type_by_variant(
field.variant)
self.__printer()
self.__PrintFieldDescription(extended_field.description)
label = str(field.label).lower()
if field_type in (messages.EnumField, messages.MessageField):
proto_type = field.type_name
else:
proto_type = str(field.variant).lower()
default_statement = ''
if field.default_value:
if field_type in [messages.BytesField, messages.StringField]:
default_value = '"%s"' % field.default_value
elif field_type is messages.BooleanField:
default_value = str(field.default_value).lower()
else:
default_value = str(field.default_value)
default_statement = ' [default = %s]' % default_value
self.__printer(
'%s %s %s = %d%s;',
label, proto_type, field.name, field.number, default_statement)
def PrintMessage(self, message_type):
self.__printer()
self.__PrintMessageCommentLines(message_type)
if _EmptyMessage(message_type):
self.__printer('message %s {}', message_type.name)
return
self.__printer('message %s {', message_type.name)
with self.__printer.Indent():
_PrintEnums(self, message_type.enum_types)
_PrintMessages(self, message_type.message_types)
self.__PrintFields(message_type.fields)
self.__printer('}')
def PrintCustomJsonMapping(self, mapping_lines):
raise NotImplementedError(
'Custom JSON encoding not supported for proto2')
class _ProtoRpcPrinter(ProtoPrinter):
"""Printer for ProtoRPC definitions."""
def __init__(self, printer):
self.__printer = printer
def __PrintClassSeparator(self):
self.__printer()
if not self.__printer.indent:
self.__printer()
def __PrintEnumDocstringLines(self, enum_type):
description = enum_type.description or '%s enum type.' % enum_type.name
for line in textwrap.wrap('r"""%s' % description,
self.__printer.CalculateWidth()):
self.__printer(line)
PrintIndentedDescriptions(self.__printer, enum_type.values, 'Values')
self.__printer('"""')
def PrintEnum(self, enum_type):
self.__printer('class %s(_messages.Enum):', enum_type.name)
with self.__printer.Indent():
self.__PrintEnumDocstringLines(enum_type)
enum_values = sorted(
enum_type.values, key=operator.attrgetter('number'))
for enum_value in enum_values:
self.__printer('%s = %s', enum_value.name, enum_value.number)
if not enum_type.values:
self.__printer('pass')
self.__PrintClassSeparator()
def __PrintAdditionalImports(self, imports):
"""Print additional imports needed for protorpc."""
google_imports = [x for x in imports if 'google' in x]
other_imports = [x for x in imports if 'google' not in x]
if other_imports:
for import_ in sorted(other_imports):
self.__printer(import_)
self.__printer()
# Note: If we ever were going to add imports from this package, we'd
# need to sort those out and put them at the end.
if google_imports:
for import_ in sorted(google_imports):
self.__printer(import_)
self.__printer()
def PrintPreamble(self, package, version, file_descriptor):
self.__printer('"""Generated message classes for %s version %s.',
package, version)
self.__printer()
for line in textwrap.wrap(file_descriptor.description, 78):
self.__printer(line)
self.__printer('"""')
self.__printer('# NOTE: This file is autogenerated and should not be '
'edited by hand.')
self.__printer()
self.__printer('from __future__ import absolute_import')
self.__printer()
self.__PrintAdditionalImports(file_descriptor.additional_imports)
self.__printer()
self.__printer("package = '%s'", file_descriptor.package)
self.__printer()
self.__printer()
def __PrintMessageDocstringLines(self, message_type):
"""Print the docstring for this message."""
description = message_type.description or '%s message type.' % (
message_type.name)
short_description = (
_EmptyMessage(message_type) and
len(description) < (self.__printer.CalculateWidth() - 6))
with self.__printer.CommentContext():
if short_description:
# Note that we use explicit string interpolation here since
# we're in comment context.
self.__printer('r"""%s"""' % description)
return
for line in textwrap.wrap('r"""%s' % description,
self.__printer.CalculateWidth()):
self.__printer(line)
PrintIndentedDescriptions(self.__printer, message_type.enum_types,
'Enums')
PrintIndentedDescriptions(
self.__printer, message_type.message_types, 'Messages')
PrintIndentedDescriptions(
self.__printer, message_type.fields, 'Fields')
self.__printer('"""')
self.__printer()
def PrintMessage(self, message_type):
if message_type.alias_for:
self.__printer(
'%s = %s', message_type.name, message_type.alias_for)
self.__PrintClassSeparator()
return
for decorator in message_type.decorators:
self.__printer('@%s', decorator)
self.__printer('class %s(_messages.Message):', message_type.name)
with self.__printer.Indent():
self.__PrintMessageDocstringLines(message_type)
_PrintEnums(self, message_type.enum_types)
_PrintMessages(self, message_type.message_types)
_PrintFields(message_type.fields, self.__printer)
self.__PrintClassSeparator()
def PrintCustomJsonMapping(self, mapping):
self.__printer(mapping)
def _PrintEnums(proto_printer, enum_types):
"""Print all enums to the given proto_printer."""
enum_types = sorted(enum_types, key=operator.attrgetter('name'))
for enum_type in enum_types:
proto_printer.PrintEnum(enum_type)
def _PrintMessages(proto_printer, message_list):
message_list = sorted(message_list, key=operator.attrgetter('name'))
for message_type in message_list:
proto_printer.PrintMessage(message_type)
_MESSAGE_FIELD_MAP = {
message_types.DateTimeMessage.definition_name(): (
message_types.DateTimeField),
}
def _PrintFields(fields, printer):
for extended_field in fields:
field = extended_field.field_descriptor
printed_field_info = {
'name': field.name,
'module': '_messages',
'type_name': '',
'type_format': '',
'number': field.number,
'label_format': '',
'variant_format': '',
'default_format': '',
}
message_field = _MESSAGE_FIELD_MAP.get(field.type_name)
if message_field:
printed_field_info['module'] = '_message_types'
field_type = message_field
elif field.type_name == 'extra_types.DateField':
printed_field_info['module'] = 'extra_types'
field_type = extra_types.DateField
else:
field_type = messages.Field.lookup_field_type_by_variant(
field.variant)
if field_type in (messages.EnumField, messages.MessageField):
printed_field_info['type_format'] = "'%s', " % field.type_name
if field.label == protorpc_descriptor.FieldDescriptor.Label.REQUIRED:
printed_field_info['label_format'] = ', required=True'
elif field.label == protorpc_descriptor.FieldDescriptor.Label.REPEATED:
printed_field_info['label_format'] = ', repeated=True'
if field_type.DEFAULT_VARIANT != field.variant:
printed_field_info['variant_format'] = (
', variant=_messages.Variant.%s' % field.variant)
if field.default_value:
if field_type in [messages.BytesField, messages.StringField]:
default_value = repr(field.default_value)
elif field_type is messages.EnumField:
try:
default_value = str(int(field.default_value))
except ValueError:
default_value = repr(field.default_value)
else:
default_value = field.default_value
printed_field_info[
'default_format'] = ', default=%s' % (default_value,)
printed_field_info['type_name'] = field_type.__name__
args = ''.join('%%(%s)s' % field for field in (
'type_format',
'number',
'label_format',
'variant_format',
'default_format'))
format_str = '%%(name)s = %%(module)s.%%(type_name)s(%s)' % args
printer(format_str % printed_field_info)
| |
from __future__ import unicode_literals
import pickle
import time
from datetime import datetime
from django.conf import settings
from django.template import Context, engines
from django.template.response import (
ContentNotRenderedError, SimpleTemplateResponse, TemplateResponse,
)
from django.test import (
RequestFactory, SimpleTestCase, ignore_warnings, override_settings,
)
from django.test.utils import require_jinja2
from django.utils.deprecation import RemovedInDjango110Warning
from .utils import TEMPLATE_DIR
def test_processor(request):
return {'processors': 'yes'}
test_processor_name = 'template_tests.test_response.test_processor'
# A test middleware that installs a temporary URLConf
class CustomURLConfMiddleware(object):
def process_request(self, request):
request.urlconf = 'template_tests.alternate_urls'
class SimpleTemplateResponseTest(SimpleTestCase):
def _response(self, template='foo', *args, **kwargs):
template = engines['django'].from_string(template)
return SimpleTemplateResponse(template, *args, **kwargs)
def test_template_resolving(self):
response = SimpleTemplateResponse('first/test.html')
response.render()
self.assertEqual(response.content, b'First template\n')
templates = ['foo.html', 'second/test.html', 'first/test.html']
response = SimpleTemplateResponse(templates)
response.render()
self.assertEqual(response.content, b'Second template\n')
response = self._response()
response.render()
self.assertEqual(response.content, b'foo')
def test_explicit_baking(self):
# explicit baking
response = self._response()
self.assertFalse(response.is_rendered)
response.render()
self.assertTrue(response.is_rendered)
def test_render(self):
# response is not re-rendered without the render call
response = self._response().render()
self.assertEqual(response.content, b'foo')
# rebaking doesn't change the rendered content
template = engines['django'].from_string('bar{{ baz }}')
response.template_name = template
response.render()
self.assertEqual(response.content, b'foo')
# but rendered content can be overridden by manually
# setting content
response.content = 'bar'
self.assertEqual(response.content, b'bar')
def test_iteration_unrendered(self):
# unrendered response raises an exception on iteration
response = self._response()
self.assertFalse(response.is_rendered)
def iteration():
for x in response:
pass
self.assertRaises(ContentNotRenderedError, iteration)
self.assertFalse(response.is_rendered)
def test_iteration_rendered(self):
# iteration works for rendered responses
response = self._response().render()
res = [x for x in response]
self.assertEqual(res, [b'foo'])
def test_content_access_unrendered(self):
# unrendered response raises an exception when content is accessed
response = self._response()
self.assertFalse(response.is_rendered)
self.assertRaises(ContentNotRenderedError, lambda: response.content)
self.assertFalse(response.is_rendered)
def test_content_access_rendered(self):
# rendered response content can be accessed
response = self._response().render()
self.assertEqual(response.content, b'foo')
def test_set_content(self):
# content can be overridden
response = self._response()
self.assertFalse(response.is_rendered)
response.content = 'spam'
self.assertTrue(response.is_rendered)
self.assertEqual(response.content, b'spam')
response.content = 'baz'
self.assertEqual(response.content, b'baz')
def test_dict_context(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'})
self.assertEqual(response.context_data, {'foo': 'bar'})
response.render()
self.assertEqual(response.content, b'bar')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_context_instance(self):
response = self._response('{{ foo }}{{ processors }}',
Context({'foo': 'bar'}))
self.assertEqual(response.context_data.__class__, Context)
response.render()
self.assertEqual(response.content, b'bar')
def test_kwargs(self):
response = self._response(content_type='application/json', status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = SimpleTemplateResponse('', {}, 'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
response = SimpleTemplateResponse('template_tests/using.html').render()
self.assertEqual(response.content, b'DTL\n')
response = SimpleTemplateResponse('template_tests/using.html', using='django').render()
self.assertEqual(response.content, b'DTL\n')
response = SimpleTemplateResponse('template_tests/using.html', using='jinja2').render()
self.assertEqual(response.content, b'Jinja2\n')
def test_post_callbacks(self):
"Rendering a template response triggers the post-render callbacks"
post = []
def post1(obj):
post.append('post1')
def post2(obj):
post.append('post2')
response = SimpleTemplateResponse('first/test.html', {})
response.add_post_render_callback(post1)
response.add_post_render_callback(post2)
# When the content is rendered, all the callbacks are invoked, too.
response.render()
self.assertEqual(response.content, b'First template\n')
self.assertEqual(post, ['post1', 'post2'])
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ('template_name', 'context_data', '_post_render_callbacks')
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
def test_pickling_cookie(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
response.cookies['key'] = 'value'
response.render()
pickled_response = pickle.dumps(response, pickle.HIGHEST_PROTOCOL)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.cookies['key'].value, 'value')
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'OPTIONS': {
'context_processors': [test_processor_name],
},
}])
class TemplateResponseTest(SimpleTestCase):
def setUp(self):
self.factory = RequestFactory()
def _response(self, template='foo', *args, **kwargs):
self._request = self.factory.get('/')
template = engines['django'].from_string(template)
return TemplateResponse(self._request, template, *args, **kwargs)
def test_render(self):
response = self._response('{{ foo }}{{ processors }}').render()
self.assertEqual(response.content, b'yes')
def test_render_with_requestcontext(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'}).render()
self.assertEqual(response.content, b'baryes')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_with_context(self):
response = self._response('{{ foo }}{{ processors }}',
Context({'foo': 'bar'})).render()
self.assertEqual(response.content, b'bar')
def test_context_processor_priority(self):
# context processors should be overridden by passed-in context
response = self._response('{{ foo }}{{ processors }}',
{'processors': 'no'}).render()
self.assertEqual(response.content, b'no')
def test_kwargs(self):
response = self._response(content_type='application/json',
status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = TemplateResponse(self.factory.get('/'), '', {},
'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
request = self.factory.get('/')
response = TemplateResponse(request, 'template_tests/using.html').render()
self.assertEqual(response.content, b'DTL\n')
response = TemplateResponse(request, 'template_tests/using.html', using='django').render()
self.assertEqual(response.content, b'DTL\n')
response = TemplateResponse(request, 'template_tests/using.html', using='jinja2').render()
self.assertEqual(response.content, b'Jinja2\n')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_custom_app(self):
self._response('{{ foo }}', current_app="foobar")
self.assertEqual(self._request.current_app, 'foobar')
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = TemplateResponse(self.factory.get('/'),
'first/test.html', {
'value': 123,
'fn': datetime.now,
}
)
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ('template_name', 'context_data',
'_post_render_callbacks', '_request', '_current_app')
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
@override_settings(
MIDDLEWARE_CLASSES=list(settings.MIDDLEWARE_CLASSES) + [
'template_tests.test_response.CustomURLConfMiddleware'
],
ROOT_URLCONF='template_tests.urls',
)
class CustomURLConfTest(SimpleTestCase):
def test_custom_urlconf(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is where you can find the snark: /snark/')
@override_settings(
CACHE_MIDDLEWARE_SECONDS=2.0,
MIDDLEWARE_CLASSES=list(settings.MIDDLEWARE_CLASSES) + [
'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
],
ROOT_URLCONF='template_tests.alternate_urls',
)
class CacheMiddlewareTest(SimpleTestCase):
def test_middleware_caching(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
| |
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <conradlee@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Martino Sorbaro <martino.sorbaro@ed.ac.uk>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=1):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance or None (default)
The generator used to randomly select the samples from input points
for bandwidth estimation. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
n_neighbors = int(X.shape[0] * quantile)
if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0
n_neighbors = 1
nbrs = NearestNeighbors(n_neighbors=n_neighbors,
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (np.linalg.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
For an example, see :ref:`examples/cluster/plot_mean_shift.py
<sphx_glr_auto_examples_cluster_plot_mean_shift.py>`.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=n_jobs).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
y : Ignored
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| |
# maxdb/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MaxDB database.
.. note::
The MaxDB dialect is **non-functional as of SQLAlchemy 0.6**,
pending development efforts to bring it up-to-date.
Overview
--------
The ``maxdb`` dialect is **experimental** and has only been tested on 7.6.03.007
and 7.6.00.037. Of these, **only 7.6.03.007 will work** with SQLAlchemy's ORM.
The earlier version has severe ``LEFT JOIN`` limitations and will return
incorrect results from even very simple ORM queries.
Only the native Python DB-API is currently supported. ODBC driver support
is a future enhancement.
Connecting
----------
The username is case-sensitive. If you usually connect to the
database with sqlcli and other tools in lower case, you likely need to
use upper case for DB-API.
Implementation Notes
--------------------
With the 7.6.00.37 driver and Python 2.5, it seems that all DB-API
generated exceptions are broken and can cause Python to crash.
For 'somecol.in_([])' to work, the IN operator's generation must be changed
to cast 'NULL' to a numeric, i.e. NUM(NULL). The DB-API doesn't accept a
bind parameter there, so that particular generation must inline the NULL value,
which depends on [ticket:807].
The DB-API is very picky about where bind params may be used in queries.
Bind params for some functions (e.g. MOD) need type information supplied.
The dialect does not yet do this automatically.
Max will occasionally throw up 'bad sql, compile again' exceptions for
perfectly valid SQL. The dialect does not currently handle these, more
research is needed.
MaxDB 7.5 and Sap DB <= 7.4 reportedly do not support schemas. A very
slightly different version of this dialect would be required to support
those versions, and can easily be added if there is demand. Some other
required components such as an Max-aware 'old oracle style' join compiler
(thetas with (+) outer indicators) are already done and available for
integration- email the devel list if you're interested in working on
this.
Versions tested: 7.6.03.07 and 7.6.00.37, native Python DB-API
* MaxDB has severe limitations on OUTER JOINs, which are essential to ORM
eager loading. And rather than raise an error if a SELECT can't be serviced,
the database simply returns incorrect results.
* Version 7.6.03.07 seems to JOIN properly, however the docs do not show the
OUTER restrictions being lifted (as of this writing), and no changelog is
available to confirm either. If you are using a different server version and
your tasks require the ORM or any semi-advanced SQL through the SQL layer,
running the SQLAlchemy test suite against your database is HIGHLY
recommended before you begin.
* Version 7.6.00.37 is LHS/RHS sensitive in `FROM lhs LEFT OUTER JOIN rhs ON
lhs.col=rhs.col` vs `rhs.col=lhs.col`!
* Version 7.6.00.37 is confused by `SELECT DISTINCT col as alias FROM t ORDER
BY col` - these aliased, DISTINCT, ordered queries need to be re-written to
order by the alias name.
* Version 7.6.x supports creating a SAVEPOINT but not its RELEASE.
* MaxDB supports autoincrement-style columns (DEFAULT SERIAL) and independent
sequences. When including a DEFAULT SERIAL column in an insert, 0 needs to
be inserted rather than NULL to generate a value.
* MaxDB supports ANSI and "old Oracle style" theta joins with (+) outer join
indicators.
* The SQLAlchemy dialect is schema-aware and probably won't function correctly
on server versions (pre-7.6?). Support for schema-less server versions could
be added if there's call.
* ORDER BY is not supported in subqueries. LIMIT is not supported in
subqueries. In 7.6.00.37, TOP does work in subqueries, but without limit not
so useful. OFFSET does not work in 7.6 despite being in the docs. Row number
tricks in WHERE via ROWNO may be possible but it only seems to allow
less-than comparison!
* Version 7.6.03.07 can't LIMIT if a derived table is in FROM: `SELECT * FROM
(SELECT * FROM a) LIMIT 2`
* MaxDB does not support sql's CAST and can only usefullly cast two types.
There isn't much implicit type conversion, so be precise when creating
`PassiveDefaults` in DDL generation: `'3'` and `3` aren't the same.
sapdb.dbapi
^^^^^^^^^^^
* As of 2007-10-22 the Python 2.4 and 2.5 compatible versions of the DB-API
are no longer available. A forum posting at SAP states that the Python
driver will be available again "in the future". The last release from MySQL
AB works if you can find it.
* sequence.NEXTVAL skips every other value!
* No rowcount for executemany()
* If an INSERT into a table with a DEFAULT SERIAL column inserts the results
of a function `INSERT INTO t VALUES (LENGTH('foo'))`, the cursor won't have
the serial id. It needs to be manually yanked from tablename.CURRVAL.
* Super-duper picky about where bind params can be placed. Not smart about
converting Python types for some functions, such as `MOD(5, ?)`.
* LONG (text, binary) values in result sets are read-once. The dialect uses a
caching RowProxy when these types are present.
* Connection objects seem like they want to be either `close()`d or garbage
collected, but not both. There's a warning issued but it seems harmless.
"""
import datetime, itertools, re
from sqlalchemy import exc, schema, sql, util, processors
from sqlalchemy.sql import operators as sql_operators, expression as sql_expr
from sqlalchemy.sql import compiler, visitors
from sqlalchemy.engine import base as engine_base, default, reflection
from sqlalchemy import types as sqltypes
class _StringType(sqltypes.String):
_type = None
def __init__(self, length=None, encoding=None, **kw):
super(_StringType, self).__init__(length=length, **kw)
self.encoding = encoding
def bind_processor(self, dialect):
if self.encoding == 'unicode':
return None
else:
def process(value):
if isinstance(value, unicode):
return value.encode(dialect.encoding)
else:
return value
return process
def result_processor(self, dialect, coltype):
#XXX: this code is probably very slow and one should try (if at all
# possible) to determine the correct code path on a per-connection
# basis (ie, here in result_processor, instead of inside the processor
# function itself) and probably also use a few generic
# processors, or possibly per query (though there is no mechanism
# for that yet).
def process(value):
while True:
if value is None:
return None
elif isinstance(value, unicode):
return value
elif isinstance(value, str):
if self.convert_unicode or dialect.convert_unicode:
return value.decode(dialect.encoding)
else:
return value
elif hasattr(value, 'read'):
# some sort of LONG, snarf and retry
value = value.read(value.remainingLength())
continue
else:
# unexpected type, return as-is
return value
return process
class MaxString(_StringType):
_type = 'VARCHAR'
class MaxUnicode(_StringType):
_type = 'VARCHAR'
def __init__(self, length=None, **kw):
kw['encoding'] = 'unicode'
super(MaxUnicode, self).__init__(length=length, **kw)
class MaxChar(_StringType):
_type = 'CHAR'
class MaxText(_StringType):
_type = 'LONG'
def __init__(self, length=None, **kw):
super(MaxText, self).__init__(length, **kw)
def get_col_spec(self):
spec = 'LONG'
if self.encoding is not None:
spec = ' '.join((spec, self.encoding))
elif self.convert_unicode:
spec = ' '.join((spec, 'UNICODE'))
return spec
class MaxNumeric(sqltypes.Numeric):
"""The FIXED (also NUMERIC, DECIMAL) data type."""
def __init__(self, precision=None, scale=None, **kw):
kw.setdefault('asdecimal', True)
super(MaxNumeric, self).__init__(scale=scale, precision=precision,
**kw)
def bind_processor(self, dialect):
return None
class MaxTimestamp(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
elif isinstance(value, basestring):
return value
elif dialect.datetimeformat == 'internal':
ms = getattr(value, 'microsecond', 0)
return value.strftime("%Y%m%d%H%M%S" + ("%06u" % ms))
elif dialect.datetimeformat == 'iso':
ms = getattr(value, 'microsecond', 0)
return value.strftime("%Y-%m-%d %H:%M:%S." + ("%06u" % ms))
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." % (
dialect.datetimeformat,))
return process
def result_processor(self, dialect, coltype):
if dialect.datetimeformat == 'internal':
def process(value):
if value is None:
return None
else:
return datetime.datetime(
*[int(v)
for v in (value[0:4], value[4:6], value[6:8],
value[8:10], value[10:12], value[12:14],
value[14:])])
elif dialect.datetimeformat == 'iso':
def process(value):
if value is None:
return None
else:
return datetime.datetime(
*[int(v)
for v in (value[0:4], value[5:7], value[8:10],
value[11:13], value[14:16], value[17:19],
value[20:])])
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
class MaxDate(sqltypes.Date):
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
elif isinstance(value, basestring):
return value
elif dialect.datetimeformat == 'internal':
return value.strftime("%Y%m%d")
elif dialect.datetimeformat == 'iso':
return value.strftime("%Y-%m-%d")
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." % (
dialect.datetimeformat,))
return process
def result_processor(self, dialect, coltype):
if dialect.datetimeformat == 'internal':
def process(value):
if value is None:
return None
else:
return datetime.date(int(value[0:4]), int(value[4:6]),
int(value[6:8]))
elif dialect.datetimeformat == 'iso':
def process(value):
if value is None:
return None
else:
return datetime.date(int(value[0:4]), int(value[5:7]),
int(value[8:10]))
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
class MaxTime(sqltypes.Time):
def bind_processor(self, dialect):
def process(value):
if value is None:
return None
elif isinstance(value, basestring):
return value
elif dialect.datetimeformat == 'internal':
return value.strftime("%H%M%S")
elif dialect.datetimeformat == 'iso':
return value.strftime("%H-%M-%S")
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." % (
dialect.datetimeformat,))
return process
def result_processor(self, dialect, coltype):
if dialect.datetimeformat == 'internal':
def process(value):
if value is None:
return None
else:
return datetime.time(int(value[0:4]), int(value[4:6]),
int(value[6:8]))
elif dialect.datetimeformat == 'iso':
def process(value):
if value is None:
return None
else:
return datetime.time(int(value[0:4]), int(value[5:7]),
int(value[8:10]))
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
class MaxBlob(sqltypes.LargeBinary):
def bind_processor(self, dialect):
return processors.to_str
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.read(value.remainingLength())
return process
class MaxDBTypeCompiler(compiler.GenericTypeCompiler):
def _string_spec(self, string_spec, type_):
if type_.length is None:
spec = 'LONG'
else:
spec = '%s(%s)' % (string_spec, type_.length)
if getattr(type_, 'encoding'):
spec = ' '.join([spec, getattr(type_, 'encoding').upper()])
return spec
def visit_text(self, type_):
spec = 'LONG'
if getattr(type_, 'encoding', None):
spec = ' '.join((spec, type_.encoding))
elif type_.convert_unicode:
spec = ' '.join((spec, 'UNICODE'))
return spec
def visit_char(self, type_):
return self._string_spec("CHAR", type_)
def visit_string(self, type_):
return self._string_spec("VARCHAR", type_)
def visit_large_binary(self, type_):
return "LONG BYTE"
def visit_numeric(self, type_):
if type_.scale and type_.precision:
return 'FIXED(%s, %s)' % (type_.precision, type_.scale)
elif type_.precision:
return 'FIXED(%s)' % type_.precision
else:
return 'INTEGER'
def visit_BOOLEAN(self, type_):
return "BOOLEAN"
colspecs = {
sqltypes.Numeric: MaxNumeric,
sqltypes.DateTime: MaxTimestamp,
sqltypes.Date: MaxDate,
sqltypes.Time: MaxTime,
sqltypes.String: MaxString,
sqltypes.Unicode:MaxUnicode,
sqltypes.LargeBinary: MaxBlob,
sqltypes.Text: MaxText,
sqltypes.CHAR: MaxChar,
sqltypes.TIMESTAMP: MaxTimestamp,
sqltypes.BLOB: MaxBlob,
sqltypes.Unicode: MaxUnicode,
}
ischema_names = {
'boolean': sqltypes.BOOLEAN,
'char': sqltypes.CHAR,
'character': sqltypes.CHAR,
'date': sqltypes.DATE,
'fixed': sqltypes.Numeric,
'float': sqltypes.FLOAT,
'int': sqltypes.INT,
'integer': sqltypes.INT,
'long binary': sqltypes.BLOB,
'long unicode': sqltypes.Text,
'long': sqltypes.Text,
'long': sqltypes.Text,
'smallint': sqltypes.SmallInteger,
'time': sqltypes.Time,
'timestamp': sqltypes.TIMESTAMP,
'varchar': sqltypes.VARCHAR,
}
# TODO: migrate this to sapdb.py
class MaxDBExecutionContext(default.DefaultExecutionContext):
def post_exec(self):
# DB-API bug: if there were any functions as values,
# then do another select and pull CURRVAL from the
# autoincrement column's implicit sequence... ugh
if self.compiled.isinsert and not self.executemany:
table = self.compiled.statement.table
index, serial_col = _autoserial_column(table)
if serial_col and (not self.compiled._safeserial or
not(self._last_inserted_ids) or
self._last_inserted_ids[index] in (None, 0)):
if table.schema:
sql = "SELECT %s.CURRVAL FROM DUAL" % (
self.compiled.preparer.format_table(table))
else:
sql = "SELECT CURRENT_SCHEMA.%s.CURRVAL FROM DUAL" % (
self.compiled.preparer.format_table(table))
rs = self.cursor.execute(sql)
id = rs.fetchone()[0]
if not self._last_inserted_ids:
# This shouldn't ever be > 1? Right?
self._last_inserted_ids = \
[None] * len(table.primary_key.columns)
self._last_inserted_ids[index] = id
super(MaxDBExecutionContext, self).post_exec()
def get_result_proxy(self):
if self.cursor.description is not None:
for column in self.cursor.description:
if column[1] in ('Long Binary', 'Long', 'Long Unicode'):
return MaxDBResultProxy(self)
return engine_base.ResultProxy(self)
@property
def rowcount(self):
if hasattr(self, '_rowcount'):
return self._rowcount
else:
return self.cursor.rowcount
def fire_sequence(self, seq):
if seq.optional:
return None
return self._execute_scalar("SELECT %s.NEXTVAL FROM DUAL" % (
self.dialect.identifier_preparer.format_sequence(seq)))
class MaxDBCachedColumnRow(engine_base.RowProxy):
"""A RowProxy that only runs result_processors once per column."""
def __init__(self, parent, row):
super(MaxDBCachedColumnRow, self).__init__(parent, row)
self.columns = {}
self._row = row
self._parent = parent
def _get_col(self, key):
if key not in self.columns:
self.columns[key] = self._parent._get_col(self._row, key)
return self.columns[key]
def __iter__(self):
for i in xrange(len(self._row)):
yield self._get_col(i)
def __repr__(self):
return repr(list(self))
def __eq__(self, other):
return ((other is self) or
(other == tuple([self._get_col(key)
for key in xrange(len(self._row))])))
def __getitem__(self, key):
if isinstance(key, slice):
indices = key.indices(len(self._row))
return tuple([self._get_col(i) for i in xrange(*indices)])
else:
return self._get_col(key)
def __getattr__(self, name):
try:
return self._get_col(name)
except KeyError:
raise AttributeError(name)
class MaxDBResultProxy(engine_base.ResultProxy):
_process_row = MaxDBCachedColumnRow
class MaxDBCompiler(compiler.SQLCompiler):
function_conversion = {
'CURRENT_DATE': 'DATE',
'CURRENT_TIME': 'TIME',
'CURRENT_TIMESTAMP': 'TIMESTAMP',
}
# These functions must be written without parens when called with no
# parameters. e.g. 'SELECT DATE FROM DUAL' not 'SELECT DATE() FROM DUAL'
bare_functions = set([
'CURRENT_SCHEMA', 'DATE', 'FALSE', 'SYSDBA', 'TIME', 'TIMESTAMP',
'TIMEZONE', 'TRANSACTION', 'TRUE', 'USER', 'UID', 'USERGROUP',
'UTCDATE', 'UTCDIFF'])
def visit_mod(self, binary, **kw):
return "mod(%s, %s)" % \
(self.process(binary.left), self.process(binary.right))
def default_from(self):
return ' FROM DUAL'
def for_update_clause(self, select):
clause = select.for_update
if clause is True:
return " WITH LOCK EXCLUSIVE"
elif clause is None:
return ""
elif clause == "read":
return " WITH LOCK"
elif clause == "ignore":
return " WITH LOCK (IGNORE) EXCLUSIVE"
elif clause == "nowait":
return " WITH LOCK (NOWAIT) EXCLUSIVE"
elif isinstance(clause, basestring):
return " WITH LOCK %s" % clause.upper()
elif not clause:
return ""
else:
return " WITH LOCK EXCLUSIVE"
def function_argspec(self, fn, **kw):
if fn.name.upper() in self.bare_functions:
return ""
elif len(fn.clauses) > 0:
return compiler.SQLCompiler.function_argspec(self, fn, **kw)
else:
return ""
def visit_function(self, fn, **kw):
transform = self.function_conversion.get(fn.name.upper(), None)
if transform:
fn = fn._clone()
fn.name = transform
return super(MaxDBCompiler, self).visit_function(fn, **kw)
def visit_cast(self, cast, **kwargs):
# MaxDB only supports casts * to NUMERIC, * to VARCHAR or
# date/time to VARCHAR. Casts of LONGs will fail.
if isinstance(cast.type, (sqltypes.Integer, sqltypes.Numeric)):
return "NUM(%s)" % self.process(cast.clause)
elif isinstance(cast.type, sqltypes.String):
return "CHR(%s)" % self.process(cast.clause)
else:
return self.process(cast.clause)
def visit_sequence(self, sequence):
if sequence.optional:
return None
else:
return (
self.dialect.identifier_preparer.format_sequence(sequence) +
".NEXTVAL")
class ColumnSnagger(visitors.ClauseVisitor):
def __init__(self):
self.count = 0
self.column = None
def visit_column(self, column):
self.column = column
self.count += 1
def _find_labeled_columns(self, columns, use_labels=False):
labels = {}
for column in columns:
if isinstance(column, basestring):
continue
snagger = self.ColumnSnagger()
snagger.traverse(column)
if snagger.count == 1:
if isinstance(column, sql_expr._Label):
labels[unicode(snagger.column)] = column.name
elif use_labels:
labels[unicode(snagger.column)] = column._label
return labels
def order_by_clause(self, select, **kw):
order_by = self.process(select._order_by_clause, **kw)
# ORDER BY clauses in DISTINCT queries must reference aliased
# inner columns by alias name, not true column name.
if order_by and getattr(select, '_distinct', False):
labels = self._find_labeled_columns(select.inner_columns,
select.use_labels)
if labels:
for needs_alias in labels.keys():
r = re.compile(r'(^| )(%s)(,| |$)' %
re.escape(needs_alias))
order_by = r.sub((r'\1%s\3' % labels[needs_alias]),
order_by)
# No ORDER BY in subqueries.
if order_by:
if self.is_subquery():
# It's safe to simply drop the ORDER BY if there is no
# LIMIT. Right? Other dialects seem to get away with
# dropping order.
if select._limit:
raise exc.CompileError(
"MaxDB does not support ORDER BY in subqueries")
else:
return ""
return " ORDER BY " + order_by
else:
return ""
def get_select_precolumns(self, select):
# Convert a subquery's LIMIT to TOP
sql = select._distinct and 'DISTINCT ' or ''
if self.is_subquery() and select._limit:
if select._offset:
raise exc.InvalidRequestError(
'MaxDB does not support LIMIT with an offset.')
sql += 'TOP %s ' % select._limit
return sql
def limit_clause(self, select):
# The docs say offsets are supported with LIMIT. But they're not.
# TODO: maybe emulate by adding a ROWNO/ROWNUM predicate?
# TODO: does MaxDB support bind params for LIMIT / TOP ?
if self.is_subquery():
# sub queries need TOP
return ''
elif select._offset:
raise exc.InvalidRequestError(
'MaxDB does not support LIMIT with an offset.')
else:
return ' \n LIMIT %s' % (select._limit,)
def visit_insert(self, insert):
self.isinsert = True
self._safeserial = True
colparams = self._get_colparams(insert)
for value in (insert.parameters or {}).itervalues():
if isinstance(value, sql_expr.Function):
self._safeserial = False
break
return ''.join(('INSERT INTO ',
self.preparer.format_table(insert.table),
' (',
', '.join([self.preparer.format_column(c[0])
for c in colparams]),
') VALUES (',
', '.join([c[1] for c in colparams]),
')'))
class MaxDBIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([
'abs', 'absolute', 'acos', 'adddate', 'addtime', 'all', 'alpha',
'alter', 'any', 'ascii', 'asin', 'atan', 'atan2', 'avg', 'binary',
'bit', 'boolean', 'byte', 'case', 'ceil', 'ceiling', 'char',
'character', 'check', 'chr', 'column', 'concat', 'constraint', 'cos',
'cosh', 'cot', 'count', 'cross', 'curdate', 'current', 'curtime',
'database', 'date', 'datediff', 'day', 'dayname', 'dayofmonth',
'dayofweek', 'dayofyear', 'dec', 'decimal', 'decode', 'default',
'degrees', 'delete', 'digits', 'distinct', 'double', 'except',
'exists', 'exp', 'expand', 'first', 'fixed', 'float', 'floor', 'for',
'from', 'full', 'get_objectname', 'get_schema', 'graphic', 'greatest',
'group', 'having', 'hex', 'hextoraw', 'hour', 'ifnull', 'ignore',
'index', 'initcap', 'inner', 'insert', 'int', 'integer', 'internal',
'intersect', 'into', 'join', 'key', 'last', 'lcase', 'least', 'left',
'length', 'lfill', 'list', 'ln', 'locate', 'log', 'log10', 'long',
'longfile', 'lower', 'lpad', 'ltrim', 'makedate', 'maketime',
'mapchar', 'max', 'mbcs', 'microsecond', 'min', 'minute', 'mod',
'month', 'monthname', 'natural', 'nchar', 'next', 'no', 'noround',
'not', 'now', 'null', 'num', 'numeric', 'object', 'of', 'on',
'order', 'packed', 'pi', 'power', 'prev', 'primary', 'radians',
'real', 'reject', 'relative', 'replace', 'rfill', 'right', 'round',
'rowid', 'rowno', 'rpad', 'rtrim', 'second', 'select', 'selupd',
'serial', 'set', 'show', 'sign', 'sin', 'sinh', 'smallint', 'some',
'soundex', 'space', 'sqrt', 'stamp', 'statistics', 'stddev',
'subdate', 'substr', 'substring', 'subtime', 'sum', 'sysdba',
'table', 'tan', 'tanh', 'time', 'timediff', 'timestamp', 'timezone',
'to', 'toidentifier', 'transaction', 'translate', 'trim', 'trunc',
'truncate', 'ucase', 'uid', 'unicode', 'union', 'update', 'upper',
'user', 'usergroup', 'using', 'utcdate', 'utcdiff', 'value', 'values',
'varchar', 'vargraphic', 'variance', 'week', 'weekofyear', 'when',
'where', 'with', 'year', 'zoned' ])
def _normalize_name(self, name):
if name is None:
return None
if name.isupper():
lc_name = name.lower()
if not self._requires_quotes(lc_name):
return lc_name
return name
def _denormalize_name(self, name):
if name is None:
return None
elif (name.islower() and
not self._requires_quotes(name)):
return name.upper()
else:
return name
def _maybe_quote_identifier(self, name):
if self._requires_quotes(name):
return self.quote_identifier(name)
else:
return name
class MaxDBDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kw):
colspec = [self.preparer.format_column(column),
self.dialect.type_compiler.process(column.type)]
if not column.nullable:
colspec.append('NOT NULL')
default = column.default
default_str = self.get_column_default_string(column)
# No DDL default for columns specified with non-optional sequence-
# this defaulting behavior is entirely client-side. (And as a
# consequence, non-reflectable.)
if (default and isinstance(default, schema.Sequence) and
not default.optional):
pass
# Regular default
elif default_str is not None:
colspec.append('DEFAULT %s' % default_str)
# Assign DEFAULT SERIAL heuristically
elif column.primary_key and column.autoincrement:
# For SERIAL on a non-primary key member, use
# DefaultClause(text('SERIAL'))
try:
first = [c for c in column.table.primary_key.columns
if (c.autoincrement and
(isinstance(c.type, sqltypes.Integer) or
(isinstance(c.type, MaxNumeric) and
c.type.precision)) and
not c.foreign_keys)].pop(0)
if column is first:
colspec.append('DEFAULT SERIAL')
except IndexError:
pass
return ' '.join(colspec)
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.default.arg, basestring):
if isinstance(column.type, sqltypes.Integer):
return str(column.default.arg)
else:
return "'%s'" % column.default.arg
else:
return unicode(self._compile(column.default.arg, None))
else:
return None
def visit_create_sequence(self, create):
"""Creates a SEQUENCE.
TODO: move to module doc?
start
With an integer value, set the START WITH option.
increment
An integer value to increment by. Default is the database default.
maxdb_minvalue
maxdb_maxvalue
With an integer value, sets the corresponding sequence option.
maxdb_no_minvalue
maxdb_no_maxvalue
Defaults to False. If true, sets the corresponding sequence option.
maxdb_cycle
Defaults to False. If true, sets the CYCLE option.
maxdb_cache
With an integer value, sets the CACHE option.
maxdb_no_cache
Defaults to False. If true, sets NOCACHE.
"""
sequence = create.element
if (not sequence.optional and
(not self.checkfirst or
not self.dialect.has_sequence(self.connection, sequence.name))):
ddl = ['CREATE SEQUENCE',
self.preparer.format_sequence(sequence)]
sequence.increment = 1
if sequence.increment is not None:
ddl.extend(('INCREMENT BY', str(sequence.increment)))
if sequence.start is not None:
ddl.extend(('START WITH', str(sequence.start)))
opts = dict([(pair[0][6:].lower(), pair[1])
for pair in sequence.kwargs.items()
if pair[0].startswith('maxdb_')])
if 'maxvalue' in opts:
ddl.extend(('MAXVALUE', str(opts['maxvalue'])))
elif opts.get('no_maxvalue', False):
ddl.append('NOMAXVALUE')
if 'minvalue' in opts:
ddl.extend(('MINVALUE', str(opts['minvalue'])))
elif opts.get('no_minvalue', False):
ddl.append('NOMINVALUE')
if opts.get('cycle', False):
ddl.append('CYCLE')
if 'cache' in opts:
ddl.extend(('CACHE', str(opts['cache'])))
elif opts.get('no_cache', False):
ddl.append('NOCACHE')
return ' '.join(ddl)
class MaxDBDialect(default.DefaultDialect):
name = 'maxdb'
supports_alter = True
supports_unicode_statements = True
max_identifier_length = 32
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
preparer = MaxDBIdentifierPreparer
statement_compiler = MaxDBCompiler
ddl_compiler = MaxDBDDLCompiler
execution_ctx_cls = MaxDBExecutionContext
ported_sqla_06 = False
colspecs = colspecs
ischema_names = ischema_names
# MaxDB-specific
datetimeformat = 'internal'
def __init__(self, _raise_known_sql_errors=False, **kw):
super(MaxDBDialect, self).__init__(**kw)
self._raise_known = _raise_known_sql_errors
if self.dbapi is None:
self.dbapi_type_map = {}
else:
self.dbapi_type_map = {
'Long Binary': MaxBlob(),
'Long byte_t': MaxBlob(),
'Long Unicode': MaxText(),
'Timestamp': MaxTimestamp(),
'Date': MaxDate(),
'Time': MaxTime(),
datetime.datetime: MaxTimestamp(),
datetime.date: MaxDate(),
datetime.time: MaxTime(),
}
def do_execute(self, cursor, statement, parameters, context=None):
res = cursor.execute(statement, parameters)
if isinstance(res, int) and context is not None:
context._rowcount = res
def do_release_savepoint(self, connection, name):
# Does MaxDB truly support RELEASE SAVEPOINT <id>? All my attempts
# produce "SUBTRANS COMMIT/ROLLBACK not allowed without SUBTRANS
# BEGIN SQLSTATE: I7065"
# Note that ROLLBACK TO works fine. In theory, a RELEASE should
# just free up some transactional resources early, before the overall
# COMMIT/ROLLBACK so omitting it should be relatively ok.
pass
def _get_default_schema_name(self, connection):
return self.identifier_preparer._normalize_name(
connection.execute(
'SELECT CURRENT_SCHEMA FROM DUAL').scalar())
def has_table(self, connection, table_name, schema=None):
denormalize = self.identifier_preparer._denormalize_name
bind = [denormalize(table_name)]
if schema is None:
sql = ("SELECT tablename FROM TABLES "
"WHERE TABLES.TABLENAME=? AND"
" TABLES.SCHEMANAME=CURRENT_SCHEMA ")
else:
sql = ("SELECT tablename FROM TABLES "
"WHERE TABLES.TABLENAME = ? AND"
" TABLES.SCHEMANAME=? ")
bind.append(denormalize(schema))
rp = connection.execute(sql, bind)
return bool(rp.first())
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
sql = (" SELECT TABLENAME FROM TABLES WHERE "
" SCHEMANAME=CURRENT_SCHEMA ")
rs = connection.execute(sql)
else:
sql = (" SELECT TABLENAME FROM TABLES WHERE "
" SCHEMANAME=? ")
matchname = self.identifier_preparer._denormalize_name(schema)
rs = connection.execute(sql, matchname)
normalize = self.identifier_preparer._normalize_name
return [normalize(row[0]) for row in rs]
def reflecttable(self, connection, table, include_columns):
denormalize = self.identifier_preparer._denormalize_name
normalize = self.identifier_preparer._normalize_name
st = ('SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, '
' NULLABLE, "DEFAULT", DEFAULTFUNCTION '
'FROM COLUMNS '
'WHERE TABLENAME=? AND SCHEMANAME=%s '
'ORDER BY POS')
fk = ('SELECT COLUMNNAME, FKEYNAME, '
' REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, '
' (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA '
' THEN 1 ELSE 0 END) AS in_schema '
'FROM FOREIGNKEYCOLUMNS '
'WHERE TABLENAME=? AND SCHEMANAME=%s '
'ORDER BY FKEYNAME ')
params = [denormalize(table.name)]
if not table.schema:
st = st % 'CURRENT_SCHEMA'
fk = fk % 'CURRENT_SCHEMA'
else:
st = st % '?'
fk = fk % '?'
params.append(denormalize(table.schema))
rows = connection.execute(st, params).fetchall()
if not rows:
raise exc.NoSuchTableError(table.fullname)
include_columns = set(include_columns or [])
for row in rows:
(name, mode, col_type, encoding, length, scale,
nullable, constant_def, func_def) = row
name = normalize(name)
if include_columns and name not in include_columns:
continue
type_args, type_kw = [], {}
if col_type == 'FIXED':
type_args = length, scale
# Convert FIXED(10) DEFAULT SERIAL to our Integer
if (scale == 0 and
func_def is not None and func_def.startswith('SERIAL')):
col_type = 'INTEGER'
type_args = length,
elif col_type in 'FLOAT':
type_args = length,
elif col_type in ('CHAR', 'VARCHAR'):
type_args = length,
type_kw['encoding'] = encoding
elif col_type == 'LONG':
type_kw['encoding'] = encoding
try:
type_cls = ischema_names[col_type.lower()]
type_instance = type_cls(*type_args, **type_kw)
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(col_type, name))
type_instance = sqltypes.NullType
col_kw = {'autoincrement': False}
col_kw['nullable'] = (nullable == 'YES')
col_kw['primary_key'] = (mode == 'KEY')
if func_def is not None:
if func_def.startswith('SERIAL'):
if col_kw['primary_key']:
# No special default- let the standard autoincrement
# support handle SERIAL pk columns.
col_kw['autoincrement'] = True
else:
# strip current numbering
col_kw['server_default'] = schema.DefaultClause(
sql.text('SERIAL'))
col_kw['autoincrement'] = True
else:
col_kw['server_default'] = schema.DefaultClause(
sql.text(func_def))
elif constant_def is not None:
col_kw['server_default'] = schema.DefaultClause(sql.text(
"'%s'" % constant_def.replace("'", "''")))
table.append_column(schema.Column(name, type_instance, **col_kw))
fk_sets = itertools.groupby(connection.execute(fk, params),
lambda row: row.FKEYNAME)
for fkeyname, fkey in fk_sets:
fkey = list(fkey)
if include_columns:
key_cols = set([r.COLUMNNAME for r in fkey])
if key_cols != include_columns:
continue
columns, referants = [], []
quote = self.identifier_preparer._maybe_quote_identifier
for row in fkey:
columns.append(normalize(row.COLUMNNAME))
if table.schema or not row.in_schema:
referants.append('.'.join(
[quote(normalize(row[c]))
for c in ('REFSCHEMANAME', 'REFTABLENAME',
'REFCOLUMNNAME')]))
else:
referants.append('.'.join(
[quote(normalize(row[c]))
for c in ('REFTABLENAME', 'REFCOLUMNNAME')]))
constraint_kw = {'name': fkeyname.lower()}
if fkey[0].RULE is not None:
rule = fkey[0].RULE
if rule.startswith('DELETE '):
rule = rule[7:]
constraint_kw['ondelete'] = rule
table_kw = {}
if table.schema or not row.in_schema:
table_kw['schema'] = normalize(fkey[0].REFSCHEMANAME)
ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME),
table_kw.get('schema'))
if ref_key not in table.metadata.tables:
schema.Table(normalize(fkey[0].REFTABLENAME),
table.metadata,
autoload=True, autoload_with=connection,
**table_kw)
constraint = schema.ForeignKeyConstraint(
columns, referants, link_to_name=True,
**constraint_kw)
table.append_constraint(constraint)
def has_sequence(self, connection, name):
# [ticket:726] makes this schema-aware.
denormalize = self.identifier_preparer._denormalize_name
sql = ("SELECT sequence_name FROM SEQUENCES "
"WHERE SEQUENCE_NAME=? ")
rp = connection.execute(sql, denormalize(name))
return bool(rp.first())
def _autoserial_column(table):
"""Finds the effective DEFAULT SERIAL column of a Table, if any."""
for index, col in enumerate(table.primary_key.columns):
if (isinstance(col.type, (sqltypes.Integer, sqltypes.Numeric)) and
col.autoincrement):
if isinstance(col.default, schema.Sequence):
if col.default.optional:
return index, col
elif (col.default is None or
(not isinstance(col.server_default, schema.DefaultClause))):
return index, col
return None, None
| |
# Copyright 2016 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from SparseArray import SparseArray
from nose.tools import assert_almost_equals
from random import random
import math
try:
from math import isfinite
except ImportError:
def isfinite(x):
if math.isinf(x) or math.isnan(x):
return False
return True
def random_lst(size=100, p=0.5, neg=True):
lst = []
for i in range(size):
if random() < p:
c = 1 if neg and random() < 0.5 else -1
if not neg:
c = 1
lst.append(random() * c)
else:
lst.append(0)
return lst
def test_fromlist():
lst = random_lst()
array = SparseArray.fromlist(lst)
[assert_almost_equals(a, b) for a, b in zip([x for x in lst if x != 0],
array.data)]
def test_len_size():
lst = random_lst()
array = SparseArray.fromlist(lst)
assert len(array) == array.size()
def test_index_data():
lst = random_lst()
array = SparseArray.index_data([(k, v) for k, v in enumerate(lst) if v != 0],
len(lst))
[assert_almost_equals(a, b) for a, b in zip([x for x in lst if x != 0],
array.data)]
def test_empty():
array = SparseArray.empty(100, 10)
assert len(array) == 100
assert len(array.index) == 10
def test_two_args():
from math import atan2, hypot, pow
def add(a, b):
return a + b
for f, name in zip([add, atan2, hypot, pow],
['add', 'atan2', 'hypot', 'pow']):
for p in [0.5, 1]:
a = random_lst(p=p, neg=False)
b = random_lst(p=p, neg=False)
a[10] = 12.433
b[10] = -12.433
c = getattr(SparseArray.fromlist(a),
name)(SparseArray.fromlist(b))
res = [f(x, y) for x, y in zip(a, b)]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
print(c.non_zero, len(res), f)
print(c.data)
print(res, len(res))
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_sub():
for p in [0.5, 1]:
a = random_lst(p=p)
b = random_lst(p=p)
a[10] = 12.433
b[10] = 12.433
c = SparseArray.fromlist(a) - SparseArray.fromlist(b)
res = [x - y for x, y in zip(a, b)]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_min():
for p in [0.5, 1]:
a = random_lst(p=p)
b = random_lst(p=p)
a[10] = 12.433
b[10] = 0
c = SparseArray.fromlist(a).min(SparseArray.fromlist(b))
res = [min(x, y) for x, y in zip(a, b)]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert c.non_zero == len(res)
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_max():
for p in [0.5, 1]:
a = random_lst(p=p)
b = random_lst(p=p)
a[10] = 12.433
b[10] = 0
c = SparseArray.fromlist(a).max(SparseArray.fromlist(b))
res = [max(x, y) for x, y in zip(a, b)]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert c.non_zero == len(res)
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_div():
def div(a, b):
try:
return a / b
except ZeroDivisionError:
if a == b:
return float('nan')
return float('inf')
for p in [0.5, 1]:
a = random_lst(p=p)
b = random_lst(p=p)
c = SparseArray.fromlist(a) / SparseArray.fromlist(b)
res = [div(x, y) for x, y in zip(a, b)]
print(c.data)
print(res)
index = [k for k, v in enumerate(res) if v != 0] # and (a[k] != 0 or b[k] != 0)]
res = [res[x] for x in index]
assert c.non_zero == len(res)
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
[assert_almost_equals(v, w) for v, w in zip([x for x in res if not math.isnan(x)],
[x for x in c.data if not math.isnan(x)])]
def test_one():
from math import sin, cos, tan, asin, acos, atan
from math import sinh, cosh, tanh, asinh, acosh, atanh
from math import exp, expm1, log, log10, log1p, sqrt, lgamma
from math import fabs, ceil, floor, trunc, erf, erfc
try:
from math import log2
except ImportError:
def log2(x):
return log(x) / log(2)
def wrapper(f, v):
try:
return f(v)
except ValueError:
if f == sqrt:
return float('nan')
if v >= 0:
return float('inf')
else:
return -float('inf')
def compare(a, b):
if isfinite(a) and isfinite(b):
return assert_almost_equals(a, b)
return str(a) == str(b)
for f in [sin, cos, tan, asin, acos, atan,
sinh, cosh, tanh, asinh, acosh, atanh,
exp, expm1, log, log2, log10, log1p, sqrt,
lgamma,
fabs, ceil, floor, trunc,
erf, erfc]:
for p in [0.5, 1]:
a = random_lst(p=p)
b = SparseArray.fromlist(a)
c = getattr(b, f.__name__)()
res = [wrapper(f, x) for x in a]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
print(f, p, c.non_zero, len(res))
assert c.non_zero == len(res)
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
[compare(v, w) for v, w in zip(res,
c.data)]
def test_mul():
for p in [0.5, 1]:
a = random_lst(p=p)
b = random_lst(p=p)
c = SparseArray.fromlist(a) * SparseArray.fromlist(b)
res = [x * y for x, y in zip(a, b)]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_isfinite():
a = SparseArray.fromlist(random_lst(p=0.5))
b = SparseArray.fromlist(random_lst(p=0.5))
c = a / b
assert a.isfinite()
assert b.isfinite()
assert not c.isfinite()
def test_sum2():
a = random_lst(p=0.5)
res = sum([x for x in a])
c = SparseArray.fromlist(a).sum()
assert res == c
def test_sq():
a = random_lst(p=0.5)
res = [x**2 for x in a]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
c = SparseArray.fromlist(a).sq()
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_sign():
def sign(a):
if a > 0:
return 1
elif a < 0:
return -1
return 0
a = random_lst(p=0.5)
res = [sign(x) for x in a]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
c = SparseArray.fromlist(a).sign()
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_unit_vector():
from math import sqrt
a = random_lst(p=0.5)
norm = sqrt(sum([x**2 for x in a]))
res = [x / norm for x in a]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
c = SparseArray.fromlist(a).unit_vector()
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_finite():
a = SparseArray.fromlist(random_lst(p=0.5))
b = SparseArray.fromlist(random_lst(p=0.5))
c = a / b
res = [i for i in c.data if isfinite(i)]
d = c.finite()
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
d.data)]
def test_cumsum():
for p in [0.5, 1]:
a = random_lst(p=p)
b = random_lst(p=p)
c = random_lst(p=p)
d = SparseArray.cumsum([SparseArray.fromlist(a),
SparseArray.fromlist(b),
SparseArray.fromlist(c)])
res = [x + y + z for x, y, z in zip(a, b, c)]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert d.non_zero == len(res)
assert len(d.data) == d.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
d.index)]
print(d.non_zero, len(d.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
d.data)]
def test_cummul():
for p in [0.5, 1]:
a = random_lst(p=p)
b = random_lst(p=p)
c = random_lst(p=p)
d = SparseArray.cummul([SparseArray.fromlist(a),
SparseArray.fromlist(b),
SparseArray.fromlist(c)])
res = [x * y * z for x, y, z in zip(a, b, c)]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert d.non_zero == len(res)
assert len(d.data) == d.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
d.index)]
print(d.non_zero, len(d.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
d.data)]
def test_cummin():
for p in [0.5, 1]:
a = random_lst(p=p)
b = random_lst(p=p)
c = random_lst(p=p)
d = SparseArray.cummin([SparseArray.fromlist(a),
SparseArray.fromlist(b),
SparseArray.fromlist(c)])
res = [min([x, y, z]) for x, y, z in zip(a, b, c)]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert d.non_zero == len(res)
assert len(d.data) == d.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
d.index)]
print(d.non_zero, len(d.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
d.data)]
def test_cummax():
for p in [0.5, 1]:
a = random_lst(p=p)
b = random_lst(p=p)
c = random_lst(p=p)
d = SparseArray.cummax([SparseArray.fromlist(a),
SparseArray.fromlist(b),
SparseArray.fromlist(c)])
res = [max([x, y, z]) for x, y, z in zip(a, b, c)]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert d.non_zero == len(res)
assert len(d.data) == d.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
d.index)]
print(d.non_zero, len(d.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
d.data)]
def test_mul_const():
for k in [32.4, 0]:
for p in [0.5, 1]:
a = random_lst(p=p)
b = k
c = SparseArray.fromlist(a) * b
res = [x * b for x in a]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_mul_const2():
for k in [32.4, 0]:
for p in [0.5, 1]:
a = random_lst(p=p)
b = k
c = b * SparseArray.fromlist(a)
res = [x * b for x in a]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_sum_const():
for k in [32.4, 0]:
for p in [0.5, 1]:
a = random_lst(p=p)
b = k
c = SparseArray.fromlist(a) + b
res = [x + b for x in a]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_sum_const2():
for k in [32.4, 0]:
for p in [0.5, 1]:
a = random_lst(p=p)
b = k
c = b + SparseArray.fromlist(a)
res = [x + b for x in a]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_pickle():
import pickle
import tempfile
suno = SparseArray.fromlist(random_lst())
with tempfile.TemporaryFile('w+b') as io:
pickle.dump(suno, io)
io.seek(0)
s = pickle.load(io)
assert s.SSE(suno) == 0
def test_SSE():
a = random_lst(p=0.5)
b = random_lst(p=0.5)
res = sum([(x - y)**2 for x, y in zip(a, b)])
assert res == SparseArray.fromlist(a).SSE(SparseArray.fromlist(b))
def test_SAE():
from math import fabs
a = random_lst(p=0.5)
b = random_lst(p=0.5)
res = sum([fabs(x - y) for x, y in zip(a, b)])
assert res == SparseArray.fromlist(a).SAE(SparseArray.fromlist(b))
def test_density():
a = random_lst(p=0.5)
density = (len(a) - a.count(0)) / float(len(a))
b = SparseArray.fromlist(a)
assert b.density == density
def test_full_array():
a = random_lst(p=0.5)
b = SparseArray.fromlist(a)
[assert_almost_equals(v, w) for v, w in zip(a,
b.full_array())]
def test_used_maximum_memory():
a = random_lst(p=0.5)
b = SparseArray.fromlist(a)
assert b.used_memory
assert b.maximum_memory
def test_boundaries():
def boundaries(a):
if a > 1:
return 1
elif a < -1:
return -1
return a
a = random_lst(p=0.5)
a[10] = 100
a[11] = -32.3
res = [boundaries(x) for x in a]
index = [k for k, v in enumerate(res) if v != 0]
res = [x for x in res if x != 0]
c = SparseArray.fromlist(a).boundaries()
assert c.non_zero == len(res)
assert len(c.data) == c.non_zero
[assert_almost_equals(v, w) for v, w in zip(index,
c.index)]
print(c.non_zero, len(c.data), len([x for x in res if x != 0]))
[assert_almost_equals(v, w) for v, w in zip([x for x in res if x != 0],
c.data)]
def test_copy():
a = random_lst(p=0.5)
b = SparseArray.fromlist(a)
c = b.copy()
assert b.SSE(c) == 0
def test_dot():
for p in [0.5, 1]:
a = SparseArray.fromlist(random_lst(p=p))
b = SparseArray.fromlist(random_lst(p=p))
assert (a * b).sum() == a.dot(b)
def test_getitem():
for p in [0.5, 1]:
a = random_lst(p=p)
b = SparseArray.fromlist(a)
# print(a, b.full_array())
for k, v in enumerate(a):
print(k, v, b[k])
assert_almost_equals(v, b[k])
def test_finite_inplace():
a = SparseArray.fromlist(random_lst())
b = SparseArray.fromlist(random_lst())
c = a / b
d = c.finite()
c.finite(inplace=True)
assert c.isfinite()
assert c.SSE(d) == 0
assert len(c.index) == len(d.index)
def test_argmax():
try:
import numpy as np
except ImportError:
return
a = SparseArray.fromlist(random_lst())
b = SparseArray.fromlist(random_lst())
c = SparseArray.fromlist(random_lst())
todos = np.vstack([a.full_array(), b.full_array(),
c.full_array()])
res = todos.argmax(axis=0)
sp_res = SparseArray.argmax([a, b, c])
[assert_almost_equals(x, y) for x, y in zip(res, sp_res.full_array())]
def test_argmin():
try:
import numpy as np
except ImportError:
return
a = SparseArray.fromlist(random_lst())
b = SparseArray.fromlist(random_lst())
c = SparseArray.fromlist(random_lst())
todos = np.vstack([a.full_array(), b.full_array(),
c.full_array()])
res = todos.argmin(axis=0)
sp_res = SparseArray.argmin([a, b, c])
[assert_almost_equals(x, y) for x, y in zip(res, sp_res.full_array())]
def test_constant():
index = [x for x in range(0, 100, 10)]
a = SparseArray.constant(-2.3, index, 100)
for v, w in zip(a.index, index):
assert_almost_equals(v, w)
for v in a.data:
assert_almost_equals(v, -2.3)
assert len(a) == 100
assert a.non_zero == 10
def test_cosine_distance():
a = SparseArray.fromlist(random_lst())
b = SparseArray.fromlist(random_lst())
r = a.cosine_distance(b)
r1 = a.dot(b) / (math.sqrt(a.dot(a)) * math.sqrt(b.dot(b)))
assert_almost_equals(r, 1 - math.fabs(r1))
def test_pearson_coefficient():
a = SparseArray.fromlist(random_lst())
b = SparseArray.fromlist(random_lst())
r = a.pearson_coefficient(b)
num = len(a) * a.dot(b) - a.sum() * b.sum()
p = len(a) * a.dot(a) - math.pow(a.sum(), 2)
s = len(b) * b.dot(b) - math.pow(b.sum(), 2)
r1 = num / (math.sqrt(p) * math.sqrt(s))
assert_almost_equals(r, r1)
| |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
import six
from sahara import context
from sahara.i18n import _
from sahara.plugins import utils
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.plugins.vanilla.hadoop2 import oozie_helper as o_helper
from sahara.plugins.vanilla.hadoop2 import utils as u
from sahara.plugins.vanilla import utils as vu
from sahara.service.castellan import utils as key_manager
from sahara.swift import swift_helper as swift
from sahara.topology import topology_helper as th
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import configs as s_cfg
from sahara.utils import files as f
from sahara.utils import proxy
from sahara.utils import xmlutils as x
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HADOOP_CONF_DIR = '/opt/hadoop/etc/hadoop'
OOZIE_CONF_DIR = '/opt/oozie/conf'
HIVE_CONF_DIR = '/opt/hive/conf'
HADOOP_USER = 'hadoop'
HADOOP_GROUP = 'hadoop'
PORTS_MAP = {
"namenode": [50070, 9000],
"secondarynamenode": [50090],
"resourcemanager": [8088, 8032],
"historyserver": [19888],
"datanode": [50010, 50075, 50020],
"nodemanager": [8042],
"oozie": [11000],
"hiveserver": [9999, 10000]
}
def configure_cluster(pctx, cluster):
LOG.debug("Configuring cluster")
if (CONF.use_identity_api_v3 and CONF.use_domain_for_proxy_users and
vu.get_hiveserver(cluster) and
c_helper.is_swift_enabled(pctx, cluster)):
cluster = proxy.create_proxy_user_for_cluster(cluster)
instances = utils.get_instances(cluster)
configure_instances(pctx, instances)
configure_topology_data(pctx, cluster)
configure_spark(cluster)
def configure_spark(cluster):
extra = _extract_spark_configs_to_extra(cluster)
_push_spark_configs_to_node(cluster, extra)
def _push_spark_configs_to_node(cluster, extra):
spark_master = vu.get_spark_history_server(cluster)
if spark_master:
_push_spark_configs_to_existing_node(spark_master, cluster, extra)
_push_cleanup_job(spark_master, extra)
with spark_master.remote() as r:
r.execute_command('sudo su - -c "mkdir /tmp/spark-events" hadoop')
def _push_spark_configs_to_existing_node(spark_master, cluster, extra):
sp_home = c_helper.get_spark_home(cluster)
files = {
os.path.join(sp_home,
'conf/spark-env.sh'): extra['sp_master'],
os.path.join(
sp_home,
'conf/spark-defaults.conf'): extra['sp_defaults']
}
with spark_master.remote() as r:
r.write_files_to(files, run_as_root=True)
def _push_cleanup_job(sp_master, extra):
with sp_master.remote() as r:
if extra['job_cleanup']['valid']:
r.write_file_to('/opt/hadoop/tmp-cleanup.sh',
extra['job_cleanup']['script'],
run_as_root=True)
r.execute_command("sudo chmod 755 /opt/hadoop/tmp-cleanup.sh")
cmd = 'sudo sh -c \'echo "%s" > /etc/cron.d/spark-cleanup\''
r.execute_command(cmd % extra['job_cleanup']['cron'])
else:
r.execute_command("sudo rm -f /opt/hadoop/tmp-cleanup.sh")
r.execute_command("sudo rm -f /etc/cron.d/spark-cleanup")
def _extract_spark_configs_to_extra(cluster):
sp_master = utils.get_instance(cluster, "spark history server")
extra = dict()
config_master = ''
if sp_master is not None:
config_master = c_helper.generate_spark_env_configs(cluster)
# Any node that might be used to run spark-submit will need
# these libs for swift integration
config_defaults = c_helper.generate_spark_executor_classpath(cluster)
extra['job_cleanup'] = c_helper.generate_job_cleanup_config(cluster)
extra['sp_master'] = config_master
extra['sp_defaults'] = config_defaults
return extra
def configure_instances(pctx, instances):
if len(instances) == 0:
return
cpo.add_provisioning_step(
instances[0].cluster_id, _("Configure instances"), len(instances))
for instance in instances:
with context.set_current_instance_id(instance.instance_id):
_configure_instance(pctx, instance)
@cpo.event_wrapper(True)
def _configure_instance(pctx, instance):
_provisioning_configs(pctx, instance)
_post_configuration(pctx, instance)
def _provisioning_configs(pctx, instance):
xmls, env = _generate_configs(pctx, instance)
_push_xml_configs(instance, xmls)
_push_env_configs(instance, env)
def _generate_configs(pctx, instance):
hadoop_xml_confs = _get_hadoop_configs(pctx, instance)
user_xml_confs, user_env_confs = _get_user_configs(
pctx, instance.node_group)
xml_confs = s_cfg.merge_configs(user_xml_confs, hadoop_xml_confs)
env_confs = s_cfg.merge_configs(pctx['env_confs'], user_env_confs)
return xml_confs, env_confs
def _get_hadoop_configs(pctx, instance):
cluster = instance.node_group.cluster
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
dirs = _get_hadoop_dirs(instance)
confs = {
'Hadoop': {
'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
},
'HDFS': {
'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
'dfs.datanode.data.dir': ','.join(dirs['hadoop_data_dirs']),
'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
}
}
res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
if res_hostname:
confs['YARN'] = {
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.resourcemanager.hostname': '%s' % res_hostname,
'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
HADOOP_CONF_DIR),
'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
HADOOP_CONF_DIR)
}
confs['MapReduce'] = {
'mapreduce.framework.name': 'yarn'
}
hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
if hs_hostname:
confs['MapReduce']['mapreduce.jobhistory.address'] = (
"%s:10020" % hs_hostname)
oozie = vu.get_oozie(cluster)
if oozie:
hadoop_cfg = {
'hadoop.proxyuser.hadoop.hosts': '*',
'hadoop.proxyuser.hadoop.groups': 'hadoop'
}
confs['Hadoop'].update(hadoop_cfg)
oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
if c_helper.is_mysql_enabled(pctx, cluster):
oozie_cfg.update(o_helper.get_oozie_mysql_configs(cluster))
confs['JobFlow'] = oozie_cfg
if c_helper.is_swift_enabled(pctx, cluster):
swift_configs = {}
for config in swift.get_swift_configs():
swift_configs[config['name']] = config['value']
confs['Hadoop'].update(swift_configs)
if c_helper.is_data_locality_enabled(pctx, cluster):
confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
confs['Hadoop'].update({"topology.script.file.name":
HADOOP_CONF_DIR + "/topology.sh"})
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
if hive_hostname:
hive_pass = u.get_hive_password(cluster)
hive_cfg = {
'hive.warehouse.subdir.inherit.perms': True,
'javax.jdo.option.ConnectionURL':
'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
}
if c_helper.is_mysql_enabled(pctx, cluster):
hive_cfg.update({
'javax.jdo.option.ConnectionURL':
'jdbc:mysql://%s/metastore' % hive_hostname,
'javax.jdo.option.ConnectionDriverName':
'com.mysql.jdbc.Driver',
'javax.jdo.option.ConnectionUserName': 'hive',
'javax.jdo.option.ConnectionPassword': hive_pass,
'datanucleus.autoCreateSchema': 'false',
'datanucleus.fixedDatastore': 'true',
'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname,
})
proxy_configs = cluster.cluster_configs.get('proxy_configs')
if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
hive_cfg.update({
swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
swift.HADOOP_SWIFT_PASSWORD: key_manager.get_secret(
proxy_configs['proxy_password']),
swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'],
swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name
})
confs['Hive'] = hive_cfg
return confs
def _get_user_configs(pctx, node_group):
ng_xml_confs, ng_env_confs = _separate_configs(node_group.node_configs,
pctx['env_confs'])
cl_xml_confs, cl_env_confs = _separate_configs(
node_group.cluster.cluster_configs, pctx['env_confs'])
xml_confs = s_cfg.merge_configs(cl_xml_confs, ng_xml_confs)
env_confs = s_cfg.merge_configs(cl_env_confs, ng_env_confs)
return xml_confs, env_confs
def _separate_configs(configs, all_env_configs):
xml_configs = {}
env_configs = {}
for service, params in six.iteritems(configs):
for param, value in six.iteritems(params):
if all_env_configs.get(service, {}).get(param):
if not env_configs.get(service):
env_configs[service] = {}
env_configs[service][param] = value
else:
if not xml_configs.get(service):
xml_configs[service] = {}
xml_configs[service][param] = value
return xml_configs, env_configs
def _generate_xml(configs):
xml_confs = {}
for service, confs in six.iteritems(configs):
xml_confs[service] = x.create_hadoop_xml(confs)
return xml_confs
def _push_env_configs(instance, configs):
nn_heap = configs['HDFS']['NameNode Heap Size']
snn_heap = configs['HDFS']['SecondaryNameNode Heap Size']
dn_heap = configs['HDFS']['DataNode Heap Size']
rm_heap = configs['YARN']['ResourceManager Heap Size']
nm_heap = configs['YARN']['NodeManager Heap Size']
hs_heap = configs['MapReduce']['JobHistoryServer Heap Size']
with instance.remote() as r:
r.replace_remote_string(
'%s/hadoop-env.sh' % HADOOP_CONF_DIR,
'export HADOOP_NAMENODE_OPTS=.*',
'export HADOOP_NAMENODE_OPTS="-Xmx%dm"' % nn_heap)
r.replace_remote_string(
'%s/hadoop-env.sh' % HADOOP_CONF_DIR,
'export HADOOP_SECONDARYNAMENODE_OPTS=.*',
'export HADOOP_SECONDARYNAMENODE_OPTS="-Xmx%dm"' % snn_heap)
r.replace_remote_string(
'%s/hadoop-env.sh' % HADOOP_CONF_DIR,
'export HADOOP_DATANODE_OPTS=.*',
'export HADOOP_DATANODE_OPTS="-Xmx%dm"' % dn_heap)
r.replace_remote_string(
'%s/yarn-env.sh' % HADOOP_CONF_DIR,
'\\#export YARN_RESOURCEMANAGER_HEAPSIZE=.*',
'export YARN_RESOURCEMANAGER_HEAPSIZE=%d' % rm_heap)
r.replace_remote_string(
'%s/yarn-env.sh' % HADOOP_CONF_DIR,
'\\#export YARN_NODEMANAGER_HEAPSIZE=.*',
'export YARN_NODEMANAGER_HEAPSIZE=%d' % nm_heap)
r.replace_remote_string(
'%s/mapred-env.sh' % HADOOP_CONF_DIR,
'export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=.*',
'export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=%d' % hs_heap)
def _push_xml_configs(instance, configs):
xmls = _generate_xml(configs)
service_to_conf_map = {
'Hadoop': '%s/core-site.xml' % HADOOP_CONF_DIR,
'HDFS': '%s/hdfs-site.xml' % HADOOP_CONF_DIR,
'YARN': '%s/yarn-site.xml' % HADOOP_CONF_DIR,
'MapReduce': '%s/mapred-site.xml' % HADOOP_CONF_DIR,
'JobFlow': '%s/oozie-site.xml' % OOZIE_CONF_DIR,
'Hive': '%s/hive-site.xml' % HIVE_CONF_DIR
}
xml_confs = {}
for service, confs in six.iteritems(xmls):
if service not in service_to_conf_map.keys():
continue
xml_confs[service_to_conf_map[service]] = confs
_push_configs_to_instance(instance, xml_confs)
def _push_configs_to_instance(instance, configs):
LOG.debug("Push configs to instance {instance}".format(
instance=instance.instance_name))
with instance.remote() as r:
for fl, data in six.iteritems(configs):
r.write_file_to(fl, data, run_as_root=True)
def _post_configuration(pctx, instance):
dirs = _get_hadoop_dirs(instance)
args = {
'hadoop_user': HADOOP_USER,
'hadoop_group': HADOOP_GROUP,
'hadoop_conf_dir': HADOOP_CONF_DIR,
'oozie_conf_dir': OOZIE_CONF_DIR,
'hadoop_name_dirs': " ".join(dirs['hadoop_name_dirs']),
'hadoop_data_dirs': " ".join(dirs['hadoop_data_dirs']),
'hadoop_log_dir': dirs['hadoop_log_dir'],
'hadoop_secure_dn_log_dir': dirs['hadoop_secure_dn_log_dir'],
'yarn_log_dir': dirs['yarn_log_dir']
}
post_conf_script = f.get_file_text(
'plugins/vanilla/hadoop2/resources/post_conf.template')
post_conf_script = post_conf_script.format(**args)
with instance.remote() as r:
r.write_file_to('/tmp/post_conf.sh', post_conf_script)
r.execute_command('chmod +x /tmp/post_conf.sh')
r.execute_command('sudo /tmp/post_conf.sh')
if c_helper.is_data_locality_enabled(pctx,
instance.cluster):
t_script = HADOOP_CONF_DIR + '/topology.sh'
r.write_file_to(t_script, f.get_file_text(
'plugins/vanilla/hadoop2/resources/topology.sh'),
run_as_root=True)
r.execute_command('chmod +x ' + t_script, run_as_root=True)
def _get_hadoop_dirs(instance):
dirs = {}
storage_paths = instance.storage_paths()
dirs['hadoop_name_dirs'] = _make_hadoop_paths(
storage_paths, '/hdfs/namenode')
dirs['hadoop_data_dirs'] = _make_hadoop_paths(
storage_paths, '/hdfs/datanode')
dirs['hadoop_log_dir'] = _make_hadoop_paths(
storage_paths, '/hadoop/logs')[0]
dirs['hadoop_secure_dn_log_dir'] = _make_hadoop_paths(
storage_paths, '/hadoop/logs/secure')[0]
dirs['yarn_log_dir'] = _make_hadoop_paths(
storage_paths, '/yarn/logs')[0]
return dirs
def _make_hadoop_paths(paths, hadoop_dir):
return [path + hadoop_dir for path in paths]
@cpo.event_wrapper(
True, step=_("Configure topology data"), param=('cluster', 1))
def configure_topology_data(pctx, cluster):
if c_helper.is_data_locality_enabled(pctx, cluster):
LOG.warning("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False explicitly")
tpl_map = th.generate_topology_map(cluster, is_node_awareness=False)
topology_data = "\n".join(
[k + " " + v for k, v in tpl_map.items()]) + "\n"
for ng in cluster.node_groups:
for i in ng.instances:
i.remote().write_file_to(HADOOP_CONF_DIR + "/topology.data",
topology_data, run_as_root=True)
def get_open_ports(node_group):
ports = []
for key in PORTS_MAP:
if key in node_group.node_processes:
ports += PORTS_MAP[key]
return ports
| |
"""
This module defines the mpf, mpc classes, and standard functions for
operating with them.
"""
__docformat__ = 'plaintext'
import re
from .ctx_base import StandardBaseContext
from .libmp.backend import basestring, BACKEND
from . import libmp
from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
ComplexResult, to_pickable, from_pickable, normalize,
from_int, from_float, from_str, to_int, to_float, to_str,
from_rational, from_man_exp,
fone, fzero, finf, fninf, fnan,
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
mpf_hash, mpf_rand,
mpf_sum,
bitcount, to_fixed,
mpc_to_str,
mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
mpc_mpf_div,
mpf_pow,
mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
mpf_glaisher, mpf_twinprime, mpf_mertens,
int_types)
from . import function_docs
from . import rational
new = object.__new__
get_complex = re.compile(r'^\(?(?P<re>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?)??'
r'(?P<im>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?j)?\)?$')
if BACKEND == 'sage':
from sage.libs.mpmath.ext_main import Context as BaseMPContext
# pickle hack
import sage.libs.mpmath.ext_main as _mpf_module
else:
from .ctx_mp_python import PythonMPContext as BaseMPContext
from . import ctx_mp_python as _mpf_module
from .ctx_mp_python import _mpf, _mpc, mpnumeric
class MPContext(BaseMPContext, StandardBaseContext):
"""
Context for multiprecision arithmetic with a global precision.
"""
def __init__(ctx):
BaseMPContext.__init__(ctx)
ctx.trap_complex = False
ctx.pretty = False
ctx.types = [ctx.mpf, ctx.mpc, ctx.constant]
ctx._mpq = rational.mpq
ctx.default()
StandardBaseContext.__init__(ctx)
ctx.mpq = rational.mpq
ctx.init_builtins()
ctx.hyp_summators = {}
ctx._init_aliases()
# XXX: automate
try:
ctx.bernoulli.im_func.func_doc = function_docs.bernoulli
ctx.primepi.im_func.func_doc = function_docs.primepi
ctx.psi.im_func.func_doc = function_docs.psi
ctx.atan2.im_func.func_doc = function_docs.atan2
except AttributeError:
# python 3
ctx.bernoulli.__func__.func_doc = function_docs.bernoulli
ctx.primepi.__func__.func_doc = function_docs.primepi
ctx.psi.__func__.func_doc = function_docs.psi
ctx.atan2.__func__.func_doc = function_docs.atan2
ctx.digamma.func_doc = function_docs.digamma
ctx.cospi.func_doc = function_docs.cospi
ctx.sinpi.func_doc = function_docs.sinpi
def init_builtins(ctx):
mpf = ctx.mpf
mpc = ctx.mpc
# Exact constants
ctx.one = ctx.make_mpf(fone)
ctx.zero = ctx.make_mpf(fzero)
ctx.j = ctx.make_mpc((fzero,fone))
ctx.inf = ctx.make_mpf(finf)
ctx.ninf = ctx.make_mpf(fninf)
ctx.nan = ctx.make_mpf(fnan)
eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1),
"epsilon of working precision", "eps")
ctx.eps = eps
# Approximate constants
ctx.pi = ctx.constant(mpf_pi, "pi", "pi")
ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2")
ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10")
ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi")
ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e")
ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler")
ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan")
ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin")
ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher")
ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery")
ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree")
ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime")
ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens")
# Standard functions
ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt)
ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt)
ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log)
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp)
ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj)
ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi)
ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin)
ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos)
ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan)
ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh)
ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh)
ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh)
ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin)
ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos)
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh)
ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh)
ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh)
ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi)
ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi)
ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor)
ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil)
ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint)
ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac)
ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci)
ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma)
ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma)
ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma)
ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial)
ctx.gamma_old = ctx._wrap_libmp_function(libmp.mpf_gamma_old, libmp.mpc_gamma_old)
ctx.fac_old = ctx.factorial_old = ctx._wrap_libmp_function(libmp.mpf_factorial_old, libmp.mpc_factorial_old)
ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0)
ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic)
ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei)
ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1)
ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci)
ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si)
ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk)
ctx._ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe)
ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1)
ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None)
ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None)
ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta)
ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta)
# Faster versions
ctx.sqrt = getattr(ctx, "_sage_sqrt", ctx.sqrt)
ctx.exp = getattr(ctx, "_sage_exp", ctx.exp)
ctx.ln = getattr(ctx, "_sage_ln", ctx.ln)
ctx.cos = getattr(ctx, "_sage_cos", ctx.cos)
ctx.sin = getattr(ctx, "_sage_sin", ctx.sin)
def to_fixed(ctx, x, prec):
return x.to_fixed(prec)
def hypot(ctx, x, y):
r"""
Computes the Euclidean norm of the vector `(x, y)`, equal
to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real."""
x = ctx.convert(x)
y = ctx.convert(y)
return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding))
def _gamma_upper_int(ctx, n, z):
n = int(ctx._re(n))
if n == 0:
return ctx.e1(z)
if not hasattr(z, '_mpf_'):
raise NotImplementedError
prec, rounding = ctx._prec_rounding
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True)
if imag is None:
return ctx.make_mpf(real)
else:
return ctx.make_mpc((real, imag))
def _expint_int(ctx, n, z):
n = int(n)
if n == 1:
return ctx.e1(z)
if not hasattr(z, '_mpf_'):
raise NotImplementedError
prec, rounding = ctx._prec_rounding
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding)
if imag is None:
return ctx.make_mpf(real)
else:
return ctx.make_mpc((real, imag))
def _nthroot(ctx, x, n):
if hasattr(x, '_mpf_'):
try:
return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding))
except ComplexResult:
if ctx.trap_complex:
raise
x = (x._mpf_, libmp.fzero)
else:
x = x._mpc_
return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding))
def _besselj(ctx, n, z):
prec, rounding = ctx._prec_rounding
if hasattr(z, '_mpf_'):
return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding))
elif hasattr(z, '_mpc_'):
return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding))
def _agm(ctx, a, b=1):
prec, rounding = ctx._prec_rounding
if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'):
try:
v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding)
return ctx.make_mpf(v)
except ComplexResult:
pass
if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero)
else: a = a._mpc_
if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero)
else: b = b._mpc_
return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding))
def bernoulli(ctx, n):
return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding))
def _zeta_int(ctx, n):
return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding))
def atan2(ctx, y, x):
x = ctx.convert(x)
y = ctx.convert(y)
return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding))
def psi(ctx, m, z):
z = ctx.convert(z)
m = int(m)
if ctx._is_real_type(z):
return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding))
else:
return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding))
def cos_sin(ctx, x, **kwargs):
if type(x) not in ctx.types:
x = ctx.convert(x)
prec, rounding = ctx._parse_prec(kwargs)
if hasattr(x, '_mpf_'):
c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding)
return ctx.make_mpf(c), ctx.make_mpf(s)
elif hasattr(x, '_mpc_'):
c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding)
return ctx.make_mpc(c), ctx.make_mpc(s)
else:
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
def cospi_sinpi(ctx, x, **kwargs):
if type(x) not in ctx.types:
x = ctx.convert(x)
prec, rounding = ctx._parse_prec(kwargs)
if hasattr(x, '_mpf_'):
c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding)
return ctx.make_mpf(c), ctx.make_mpf(s)
elif hasattr(x, '_mpc_'):
c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding)
return ctx.make_mpc(c), ctx.make_mpc(s)
else:
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
def clone(ctx):
"""
Create a copy of the context, with the same working precision.
"""
a = ctx.__class__()
a.prec = ctx.prec
return a
# Several helper methods
# TODO: add more of these, make consistent, write docstrings, ...
def _is_real_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return False
return True
def _is_complex_type(ctx, x):
if hasattr(x, '_mpc_') or type(x) is complex:
return True
return False
def isnan(ctx, x):
"""
Return *True* if *x* is a NaN (not-a-number), or for a complex
number, whether either the real or complex part is NaN;
otherwise return *False*::
>>> from mpmath import *
>>> isnan(3.14)
False
>>> isnan(nan)
True
>>> isnan(mpc(3.14,2.72))
False
>>> isnan(mpc(3.14,nan))
True
"""
if hasattr(x, "_mpf_"):
return x._mpf_ == fnan
if hasattr(x, "_mpc_"):
return fnan in x._mpc_
if isinstance(x, int_types) or isinstance(x, rational.mpq):
return False
x = ctx.convert(x)
if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
return ctx.isnan(x)
raise TypeError("isnan() needs a number as input")
def isfinite(ctx, x):
"""
Return *True* if *x* is a finite number, i.e. neither
an infinity or a NaN.
>>> from mpmath import *
>>> isfinite(inf)
False
>>> isfinite(-inf)
False
>>> isfinite(3)
True
>>> isfinite(nan)
False
>>> isfinite(3+4j)
True
>>> isfinite(mpc(3,inf))
False
>>> isfinite(mpc(nan,3))
False
"""
if ctx.isinf(x) or ctx.isnan(x):
return False
return True
def isnpint(ctx, x):
"""
Determine if *x* is a nonpositive integer.
"""
if not x:
return True
if hasattr(x, '_mpf_'):
sign, man, exp, bc = x._mpf_
return sign and exp >= 0
if hasattr(x, '_mpc_'):
return not x.imag and ctx.isnpint(x.real)
if type(x) in int_types:
return x <= 0
if isinstance(x, ctx.mpq):
p, q = x._mpq_
if not p:
return True
return q == 1 and p <= 0
return ctx.isnpint(ctx.convert(x))
def __str__(ctx):
lines = ["Mpmath settings:",
(" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]",
(" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]",
(" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]",
]
return "\n".join(lines)
@property
def _repr_digits(ctx):
return repr_dps(ctx._prec)
@property
def _str_digits(ctx):
return ctx._dps
def extraprec(ctx, n, normalize_output=False):
"""
The block
with extraprec(n):
<code>
increases the precision n bits, executes <code>, and then
restores the precision.
extraprec(n)(f) returns a decorated version of the function f
that increases the working precision by n bits before execution,
and restores the parent precision afterwards. With
normalize_output=True, it rounds the return value to the parent
precision.
"""
return PrecisionManager(ctx, lambda p: p + n, None, normalize_output)
def extradps(ctx, n, normalize_output=False):
"""
This function is analogous to extraprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: d + n, normalize_output)
def workprec(ctx, n, normalize_output=False):
"""
The block
with workprec(n):
<code>
sets the precision to n bits, executes <code>, and then restores
the precision.
workprec(n)(f) returns a decorated version of the function f
that sets the precision to n bits before execution,
and restores the precision afterwards. With normalize_output=True,
it rounds the return value to the parent precision.
"""
return PrecisionManager(ctx, lambda p: n, None, normalize_output)
def workdps(ctx, n, normalize_output=False):
"""
This function is analogous to workprec (see documentation)
but changes the decimal precision instead of the number of bits.
"""
return PrecisionManager(ctx, None, lambda d: n, normalize_output)
def autoprec(ctx, f, maxprec=None, catch=(), verbose=False):
"""
Return a wrapped copy of *f* that repeatedly evaluates *f*
with increasing precision until the result converges to the
full precision used at the point of the call.
This heuristically protects against rounding errors, at the cost of
roughly a 2x slowdown compared to manually setting the optimal
precision. This method can, however, easily be fooled if the results
from *f* depend "discontinuously" on the precision, for instance
if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec`
should be used judiciously.
**Examples**
Many functions are sensitive to perturbations of the input arguments.
If the arguments are decimal numbers, they may have to be converted
to binary at a much higher precision. If the amount of required
extra precision is unknown, :func:`~mpmath.autoprec` is convenient::
>>> from mpmath import *
>>> mp.dps = 15
>>> mp.pretty = True
>>> besselj(5, 125 * 10**28) # Exact input
-8.03284785591801e-17
>>> besselj(5, '1.25e30') # Bad
7.12954868316652e-16
>>> autoprec(besselj)(5, '1.25e30') # Good
-8.03284785591801e-17
The following fails to converge because `\sin(\pi) = 0` whereas all
finite-precision approximations of `\pi` give nonzero values::
>>> autoprec(sin)(pi)
Traceback (most recent call last):
...
NoConvergence: autoprec: prec increased to 2910 without convergence
As the following example shows, :func:`~mpmath.autoprec` can protect against
cancellation, but is fooled by too severe cancellation::
>>> x = 1e-10
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
1.00000008274037e-10
1.00000000005e-10
1.00000000005e-10
>>> x = 1e-50
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
0.0
1.0e-50
0.0
With *catch*, an exception or list of exceptions to intercept
may be specified. The raised exception is interpreted
as signaling insufficient precision. This permits, for example,
evaluating a function where a too low precision results in a
division by zero::
>>> f = lambda x: 1/(exp(x)-1)
>>> f(1e-30)
Traceback (most recent call last):
...
ZeroDivisionError
>>> autoprec(f, catch=ZeroDivisionError)(1e-30)
1.0e+30
"""
def f_autoprec_wrapped(*args, **kwargs):
prec = ctx.prec
if maxprec is None:
maxprec2 = ctx._default_hyper_maxprec(prec)
else:
maxprec2 = maxprec
try:
ctx.prec = prec + 10
try:
v1 = f(*args, **kwargs)
except catch:
v1 = ctx.nan
prec2 = prec + 20
while 1:
ctx.prec = prec2
try:
v2 = f(*args, **kwargs)
except catch:
v2 = ctx.nan
if v1 == v2:
break
err = ctx.mag(v2-v1) - ctx.mag(v2)
if err < (-prec):
break
if verbose:
print("autoprec: target=%s, prec=%s, accuracy=%s" \
% (prec, prec2, -err))
v1 = v2
if prec2 >= maxprec2:
raise ctx.NoConvergence(\
"autoprec: prec increased to %i without convergence"\
% prec2)
prec2 += int(prec2*2)
prec2 = min(prec2, maxprec2)
finally:
ctx.prec = prec
return +v2
return f_autoprec_wrapped
def nstr(ctx, x, n=6, **kwargs):
"""
Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n*
significant digits. The small default value for *n* is chosen to
make this function useful for printing collections of numbers
(lists, matrices, etc).
If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively
to each element. For unrecognized classes, :func:`~mpmath.nstr`
simply returns ``str(x)``.
The companion function :func:`~mpmath.nprint` prints the result
instead of returning it.
>>> from mpmath import *
>>> nstr([+pi, ldexp(1,-500)])
'[3.14159, 3.05494e-151]'
>>> nprint([+pi, ldexp(1,-500)])
[3.14159, 3.05494e-151]
"""
if isinstance(x, list):
return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
if isinstance(x, tuple):
return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
if hasattr(x, '_mpf_'):
return to_str(x._mpf_, n, **kwargs)
if hasattr(x, '_mpc_'):
return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")"
if isinstance(x, basestring):
return repr(x)
if isinstance(x, ctx.matrix):
return x.__nstr__(n, **kwargs)
return str(x)
def _convert_fallback(ctx, x, strings):
if strings and isinstance(x, basestring):
if 'j' in x.lower():
x = x.lower().replace(' ', '')
match = get_complex.match(x)
re = match.group('re')
if not re:
re = 0
im = match.group('im').rstrip('j')
return ctx.mpc(ctx.convert(re), ctx.convert(im))
if hasattr(x, "_mpi_"):
a, b = x._mpi_
if a == b:
return ctx.make_mpf(a)
else:
raise ValueError("can only create mpf from zero-width interval")
raise TypeError("cannot create mpf from " + repr(x))
def mpmathify(ctx, *args, **kwargs):
return ctx.convert(*args, **kwargs)
def _parse_prec(ctx, kwargs):
if kwargs:
if kwargs.get('exact'):
return 0, 'f'
prec, rounding = ctx._prec_rounding
if 'rounding' in kwargs:
rounding = kwargs['rounding']
if 'prec' in kwargs:
prec = kwargs['prec']
if prec == ctx.inf:
return 0, 'f'
else:
prec = int(prec)
elif 'dps' in kwargs:
dps = kwargs['dps']
if dps == ctx.inf:
return 0, 'f'
prec = dps_to_prec(dps)
return prec, rounding
return ctx._prec_rounding
_exact_overflow_msg = "the exact result does not fit in memory"
_hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy
using a working precision of %i bits. Try with a higher maxprec,
maxterms, or set zeroprec."""
def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs):
if hasattr(z, "_mpf_"):
key = p, q, flags, 'R'
v = z._mpf_
elif hasattr(z, "_mpc_"):
key = p, q, flags, 'C'
v = z._mpc_
if key not in ctx.hyp_summators:
ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1]
summator = ctx.hyp_summators[key]
prec = ctx.prec
maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec))
extraprec = 50
epsshift = 25
# Jumps in magnitude occur when parameters are close to negative
# integers. We must ensure that these terms are included in
# the sum and added accurately
magnitude_check = {}
max_total_jump = 0
for i, c in enumerate(coeffs):
if flags[i] == 'Z':
if i >= p and c <= 0:
ok = False
for ii, cc in enumerate(coeffs[:p]):
# Note: c <= cc or c < cc, depending on convention
if flags[ii] == 'Z' and cc <= 0 and c <= cc:
ok = True
if not ok:
raise ZeroDivisionError("pole in hypergeometric series")
continue
n, d = ctx.nint_distance(c)
n = -int(n)
d = -d
if i >= p and n >= 0 and d > 4:
if n in magnitude_check:
magnitude_check[n] += d
else:
magnitude_check[n] = d
extraprec = max(extraprec, d - prec + 60)
max_total_jump += abs(d)
while 1:
if extraprec > maxprec:
raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec))
wp = prec + extraprec
if magnitude_check:
mag_dict = dict((n,None) for n in magnitude_check)
else:
mag_dict = {}
zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \
epsshift, mag_dict, **kwargs)
cancel = -magnitude
jumps_resolved = True
if extraprec < max_total_jump:
for n in mag_dict.values():
if (n is None) or (n < prec):
jumps_resolved = False
break
accurate = (cancel < extraprec-25-5 or not accurate_small)
if jumps_resolved:
if accurate:
break
# zero?
zeroprec = kwargs.get('zeroprec')
if zeroprec is not None:
if cancel > zeroprec:
if have_complex:
return ctx.mpc(0)
else:
return ctx.zero
# Some near-singularities were not included, so increase
# precision and repeat until they are
extraprec *= 2
# Possible workaround for bad roundoff in fixed-point arithmetic
epsshift += 5
extraprec += 5
if type(zv) is tuple:
if have_complex:
return ctx.make_mpc(zv)
else:
return ctx.make_mpf(zv)
else:
return zv
def ldexp(ctx, x, n):
r"""
Computes `x 2^n` efficiently. No rounding is performed.
The argument `x` must be a real floating-point number (or
possible to convert into one) and `n` must be a Python ``int``.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> ldexp(1, 10)
mpf('1024.0')
>>> ldexp(1, -3)
mpf('0.125')
"""
x = ctx.convert(x)
return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n))
def frexp(ctx, x):
r"""
Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`,
`n` a Python integer, and such that `x = y 2^n`. No rounding is
performed.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> frexp(7.5)
(mpf('0.9375'), 3)
"""
x = ctx.convert(x)
y, n = libmp.mpf_frexp(x._mpf_)
return ctx.make_mpf(y), n
def fneg(ctx, x, **kwargs):
"""
Negates the number *x*, giving a floating-point result, optionally
using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
An mpmath number is returned::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fneg(2.5)
mpf('-2.5')
>>> fneg(-5+2j)
mpc(real='5.0', imag='-2.0')
Precise control over rounding is possible::
>>> x = fadd(2, 1e-100, exact=True)
>>> fneg(x)
mpf('-2.0')
>>> fneg(x, rounding='f')
mpf('-2.0000000000000004')
Negating with and without roundoff::
>>> n = 200000000000000000000001
>>> print(int(-mpf(n)))
-200000000000000016777216
>>> print(int(fneg(n)))
-200000000000000016777216
>>> print(int(fneg(n, prec=log(n,2)+1)))
-200000000000000000000001
>>> print(int(fneg(n, dps=log(n,10)+1)))
-200000000000000000000001
>>> print(int(fneg(n, prec=inf)))
-200000000000000000000001
>>> print(int(fneg(n, dps=inf)))
-200000000000000000000001
>>> print(int(fneg(n, exact=True)))
-200000000000000000000001
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
if hasattr(x, '_mpf_'):
return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding))
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fadd(ctx, x, y, **kwargs):
"""
Adds the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
The default precision is the working precision of the context.
You can specify a custom precision in bits by passing the *prec* keyword
argument, or by providing an equivalent decimal precision with the *dps*
keyword argument. If the precision is set to ``+inf``, or if the flag
*exact=True* is passed, an exact addition with no rounding is performed.
When the precision is finite, the optional *rounding* keyword argument
specifies the direction of rounding. Valid options are ``'n'`` for
nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'``
for down, ``'u'`` for up.
**Examples**
Using :func:`~mpmath.fadd` with precision and rounding control::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fadd(2, 1e-20)
mpf('2.0')
>>> fadd(2, 1e-20, rounding='u')
mpf('2.0000000000000004')
>>> nprint(fadd(2, 1e-20, prec=100), 25)
2.00000000000000000001
>>> nprint(fadd(2, 1e-20, dps=15), 25)
2.0
>>> nprint(fadd(2, 1e-20, dps=25), 25)
2.00000000000000000001
>>> nprint(fadd(2, 1e-20, exact=True), 25)
2.00000000000000000001
Exact addition avoids cancellation errors, enforcing familiar laws
of numbers such as `x+y-x = y`, which don't hold in floating-point
arithmetic with finite precision::
>>> x, y = mpf(2), mpf('1e-1000')
>>> print(x + y - x)
0.0
>>> print(fadd(x, y, prec=inf) - x)
1.0e-1000
>>> print(fadd(x, y, exact=True) - x)
1.0e-1000
Exact addition can be inefficient and may be impossible to perform
with large magnitude differences::
>>> fadd(1, '1e-100000000000000000000', prec=inf)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fsub(ctx, x, y, **kwargs):
"""
Subtracts the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
Using :func:`~mpmath.fsub` with precision and rounding control::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fsub(2, 1e-20)
mpf('2.0')
>>> fsub(2, 1e-20, rounding='d')
mpf('1.9999999999999998')
>>> nprint(fsub(2, 1e-20, prec=100), 25)
1.99999999999999999999
>>> nprint(fsub(2, 1e-20, dps=15), 25)
2.0
>>> nprint(fsub(2, 1e-20, dps=25), 25)
1.99999999999999999999
>>> nprint(fsub(2, 1e-20, exact=True), 25)
1.99999999999999999999
Exact subtraction avoids cancellation errors, enforcing familiar laws
of numbers such as `x-y+y = x`, which don't hold in floating-point
arithmetic with finite precision::
>>> x, y = mpf(2), mpf('1e1000')
>>> print(x - y + y)
0.0
>>> print(fsub(x, y, prec=inf) + y)
2.0
>>> print(fsub(x, y, exact=True) + y)
2.0
Exact addition can be inefficient and may be impossible to perform
with large magnitude differences::
>>> fsub(1, '1e-100000000000000000000', prec=inf)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fmul(ctx, x, y, **kwargs):
"""
Multiplies the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
The result is an mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fmul(2, 5.0)
mpf('10.0')
>>> fmul(0.5j, 0.5)
mpc(real='0.0', imag='0.25')
Avoiding roundoff::
>>> x, y = 10**10+1, 10**15+1
>>> print(x*y)
10000000001000010000000001
>>> print(mpf(x) * mpf(y))
1.0000000001e+25
>>> print(int(mpf(x) * mpf(y)))
10000000001000011026399232
>>> print(int(fmul(x, y)))
10000000001000011026399232
>>> print(int(fmul(x, y, dps=25)))
10000000001000010000000001
>>> print(int(fmul(x, y, exact=True)))
10000000001000010000000001
Exact multiplication with complex numbers can be inefficient and may
be impossible to perform with large magnitude differences between
real and imaginary parts::
>>> x = 1+2j
>>> y = mpc(2, '1e-100000000000000000000')
>>> fmul(x, y)
mpc(real='2.0', imag='4.0')
>>> fmul(x, y, rounding='u')
mpc(real='2.0', imag='4.0000000000000009')
>>> fmul(x, y, exact=True)
Traceback (most recent call last):
...
OverflowError: the exact result does not fit in memory
"""
prec, rounding = ctx._parse_prec(kwargs)
x = ctx.convert(x)
y = ctx.convert(y)
try:
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding))
except (ValueError, OverflowError):
raise OverflowError(ctx._exact_overflow_msg)
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def fdiv(ctx, x, y, **kwargs):
"""
Divides the numbers *x* and *y*, giving a floating-point result,
optionally using a custom precision and rounding mode.
See the documentation of :func:`~mpmath.fadd` for a detailed description
of how to specify precision and rounding.
**Examples**
The result is an mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fdiv(3, 2)
mpf('1.5')
>>> fdiv(2, 3)
mpf('0.66666666666666663')
>>> fdiv(2+4j, 0.5)
mpc(real='4.0', imag='8.0')
The rounding direction and precision can be controlled::
>>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits
mpf('0.6666259765625')
>>> fdiv(2, 3, rounding='d')
mpf('0.66666666666666663')
>>> fdiv(2, 3, prec=60)
mpf('0.66666666666666667')
>>> fdiv(2, 3, rounding='u')
mpf('0.66666666666666674')
Checking the error of a division by performing it at higher precision::
>>> fdiv(2, 3) - fdiv(2, 3, prec=100)
mpf('-3.7007434154172148e-17')
Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not
allowed since the quotient of two floating-point numbers generally
does not have an exact floating-point representation. (In the
future this might be changed to allow the case where the division
is actually exact.)
>>> fdiv(2, 3, exact=True)
Traceback (most recent call last):
...
ValueError: division is not an exact operation
"""
prec, rounding = ctx._parse_prec(kwargs)
if not prec:
raise ValueError("division is not an exact operation")
x = ctx.convert(x)
y = ctx.convert(y)
if hasattr(x, '_mpf_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding))
if hasattr(x, '_mpc_'):
if hasattr(y, '_mpf_'):
return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding))
if hasattr(y, '_mpc_'):
return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding))
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
def nint_distance(ctx, x):
r"""
Return `(n,d)` where `n` is the nearest integer to `x` and `d` is
an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision
(measured in bits) lost to cancellation when computing `x-n`.
>>> from mpmath import *
>>> n, d = nint_distance(5)
>>> print(n); print(d)
5
-inf
>>> n, d = nint_distance(mpf(5))
>>> print(n); print(d)
5
-inf
>>> n, d = nint_distance(mpf(5.00000001))
>>> print(n); print(d)
5
-26
>>> n, d = nint_distance(mpf(4.99999999))
>>> print(n); print(d)
5
-26
>>> n, d = nint_distance(mpc(5,10))
>>> print(n); print(d)
5
4
>>> n, d = nint_distance(mpc(5,0.000001))
>>> print(n); print(d)
5
-19
"""
typx = type(x)
if typx in int_types:
return int(x), ctx.ninf
elif typx is rational.mpq:
p, q = x._mpq_
n, r = divmod(p, q)
if 2*r >= q:
n += 1
elif not r:
return n, ctx.ninf
# log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q)
d = bitcount(abs(p-n*q)) - bitcount(q)
return n, d
if hasattr(x, "_mpf_"):
re = x._mpf_
im_dist = ctx.ninf
elif hasattr(x, "_mpc_"):
re, im = x._mpc_
isign, iman, iexp, ibc = im
if iman:
im_dist = iexp + ibc
elif im == fzero:
im_dist = ctx.ninf
else:
raise ValueError("requires a finite number")
else:
x = ctx.convert(x)
if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
return ctx.nint_distance(x)
else:
raise TypeError("requires an mpf/mpc")
sign, man, exp, bc = re
mag = exp+bc
# |x| < 0.5
if mag < 0:
n = 0
re_dist = mag
elif man:
# exact integer
if exp >= 0:
n = man << exp
re_dist = ctx.ninf
# exact half-integer
elif exp == -1:
n = (man>>1)+1
re_dist = 0
else:
d = (-exp-1)
t = man >> d
if t & 1:
t += 1
man = (t<<d) - man
else:
man -= (t<<d)
n = t>>1 # int(t)>>1
re_dist = exp+bitcount(man)
if sign:
n = -n
elif re == fzero:
re_dist = ctx.ninf
n = 0
else:
raise ValueError("requires a finite number")
return n, max(re_dist, im_dist)
def fprod(ctx, factors):
r"""
Calculates a product containing a finite number of factors (for
infinite products, see :func:`~mpmath.nprod`). The factors will be
converted to mpmath numbers.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fprod([1, 2, 0.5, 7])
mpf('7.0')
"""
orig = ctx.prec
try:
v = ctx.one
for p in factors:
v *= p
finally:
ctx.prec = orig
return +v
def rand(ctx):
"""
Returns an ``mpf`` with value chosen randomly from `[0, 1)`.
The number of randomly generated bits in the mantissa is equal
to the working precision.
"""
return ctx.make_mpf(mpf_rand(ctx._prec))
def fraction(ctx, p, q):
"""
Given Python integers `(p, q)`, returns a lazy ``mpf`` representing
the fraction `p/q`. The value is updated with the precision.
>>> from mpmath import *
>>> mp.dps = 15
>>> a = fraction(1,100)
>>> b = mpf(1)/100
>>> print(a); print(b)
0.01
0.01
>>> mp.dps = 30
>>> print(a); print(b) # a will be accurate
0.01
0.0100000000000000002081668171172
>>> mp.dps = 15
"""
return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd),
'%s/%s' % (p, q))
def absmin(ctx, x):
return abs(ctx.convert(x))
def absmax(ctx, x):
return abs(ctx.convert(x))
def _as_points(ctx, x):
# XXX: remove this?
if hasattr(x, '_mpi_'):
a, b = x._mpi_
return [ctx.make_mpf(a), ctx.make_mpf(b)]
return x
'''
def _zetasum(ctx, s, a, b):
"""
Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small
integers.
"""
a = int(a)
b = int(b)
s = ctx.convert(s)
prec, rounding = ctx._prec_rounding
if hasattr(s, '_mpf_'):
v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec))
elif hasattr(s, '_mpc_'):
v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec))
return v
'''
def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False):
if not (ctx.isint(a) and hasattr(s, "_mpc_")):
raise NotImplementedError
a = int(a)
prec = ctx._prec
xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec)
xs = [ctx.make_mpc(x) for x in xs]
ys = [ctx.make_mpc(y) for y in ys]
return xs, ys
class PrecisionManager:
def __init__(self, ctx, precfun, dpsfun, normalize_output=False):
self.ctx = ctx
self.precfun = precfun
self.dpsfun = dpsfun
self.normalize_output = normalize_output
def __call__(self, f):
def g(*args, **kwargs):
orig = self.ctx.prec
try:
if self.precfun:
self.ctx.prec = self.precfun(self.ctx.prec)
else:
self.ctx.dps = self.dpsfun(self.ctx.dps)
if self.normalize_output:
v = f(*args, **kwargs)
if type(v) is tuple:
return tuple([+a for a in v])
return +v
else:
return f(*args, **kwargs)
finally:
self.ctx.prec = orig
g.__name__ = f.__name__
g.__doc__ = f.__doc__
return g
def __enter__(self):
self.origp = self.ctx.prec
if self.precfun:
self.ctx.prec = self.precfun(self.ctx.prec)
else:
self.ctx.dps = self.dpsfun(self.ctx.dps)
def __exit__(self, exc_type, exc_val, exc_tb):
self.ctx.prec = self.origp
return False
if __name__ == '__main__':
import doctest
doctest.testmod()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import HTMLParser
import re
from pylons import tmpl_context as c, app_globals as g, config
from bson import ObjectId
import markupsafe
from allura.lib import helpers as h
from allura.lib.decorators import task
from allura.lib import mail_util
from allura.lib import exceptions as exc
log = logging.getLogger(__name__)
smtp_client = mail_util.SMTPClient()
def mail_meta_content(metalink):
'''
Helper function used to include a view action button in your email client
https://developers.google.com/gmail/markup/reference/go-to-action#view_action
:param metalink: url to the page the action button links to
'''
return h.html.literal("""\
<div itemscope itemtype="http://schema.org/EmailMessage">
<div itemprop="action" itemscope itemtype="http://schema.org/ViewAction">
<link itemprop="url" href="%s"></link>
<meta itemprop="name" content="View"></meta>
</div>
<meta itemprop="description" content="View"></meta>
</div>""" % metalink)
@task
def route_email(
peer, mailfrom, rcpttos, data):
'''
Route messages according to their destination:
<topic>@<mount_point>.<subproj2>.<subproj1>.<project>.projects.domain.net
gets sent to c.app.handle_message(topic, message)
'''
try:
msg = mail_util.parse_message(data)
except: # pragma no cover
log.exception('Parse Error: (%r,%r,%r)', peer, mailfrom, rcpttos)
return
if mail_util.is_autoreply(msg):
log.info('Skipping autoreply message: %s', msg['headers'])
return
mail_user = mail_util.identify_sender(peer, mailfrom, msg['headers'], msg)
with h.push_config(c, user=mail_user):
log.info('Received email from %s', c.user.username)
# For each of the addrs, determine the project/app and route
# appropriately
for addr in rcpttos:
try:
userpart, project, app = mail_util.parse_address(addr)
with h.push_config(c, project=project, app=app):
if not app.has_access(c.user, userpart):
log.info('Access denied for %s to mailbox %s',
c.user, userpart)
elif not c.app.config.options.get('AllowEmailPosting', True):
log.info("Posting from email is not enabled")
else:
if msg['multipart']:
msg_hdrs = msg['headers']
for part in msg['parts']:
if part.get('content_type', '').startswith('multipart/'):
continue
msg = dict(
headers=dict(msg_hdrs, **part['headers']),
message_id=part['message_id'],
in_reply_to=part['in_reply_to'],
references=part['references'],
filename=part['filename'],
content_type=part['content_type'],
payload=part['payload'])
c.app.handle_message(userpart, msg)
else:
c.app.handle_message(userpart, msg)
except exc.MailError, e:
log.error('Error routing email to %s: %s', addr, e)
except:
log.exception('Error routing mail to %s', addr)
def create_multipart_msg(text, metalink=None):
"""
Convert Markdown text to plaintext & HTML, combine into a multipart email Message
:param text:
:param metalink:
:return:
"""
def replace_html(matchobj):
text_within_div = matchobj.group(1)
text_within_div = text_within_div.replace('</p>', '\n')
text_within_div = markupsafe._striptags_re.sub('', text_within_div)
return text_within_div
plain_text = text
plain_text = re.sub(r'<div class="markdown_content">(.*)</div>', # strip HTML from markdown generated blocks
replace_html,
plain_text,
flags=re.DOTALL, # match newlines too
)
plain_text = HTMLParser.HTMLParser().unescape(plain_text) # put literal HTML tags back into plaintext
plain_msg = mail_util.encode_email_part(plain_text, 'plain')
html_text = g.forge_markdown(email=True).convert(text)
if metalink:
html_text = html_text + mail_meta_content(metalink)
html_msg = mail_util.encode_email_part(html_text, 'html')
multi_msg = mail_util.make_multipart_message(plain_msg, html_msg)
return multi_msg, plain_msg
@task
def sendmail(fromaddr, destinations, text, reply_to, subject,
message_id, in_reply_to=None, sender=None, references=None, metalink=None):
'''
Send an email to the specified list of destinations with respect to the preferred email format specified by user.
It is best for broadcast messages.
:param fromaddr: ObjectId or str(ObjectId) of user, or email address str
'''
from allura import model as M
addrs_plain = []
addrs_multi = []
if fromaddr is None:
fromaddr = g.noreply
elif not isinstance(fromaddr, basestring) or '@' not in fromaddr:
log.warning('Looking up user with fromaddr: %s', fromaddr)
user = M.User.query.get(_id=ObjectId(fromaddr), disabled=False, pending=False)
if not user:
log.warning('Cannot find user with ID: %s', fromaddr)
fromaddr = g.noreply
else:
fromaddr = user.email_address_header()
# Divide addresses based on preferred email formats
for addr in destinations:
if mail_util.isvalid(addr):
addrs_plain.append(addr)
else:
try:
user = M.User.query.get(_id=ObjectId(addr), disabled=False, pending=False)
if not user:
log.warning('Cannot find user with ID: %s', addr)
continue
except:
log.exception('Error looking up user with ID: %r' % addr)
continue
addr = user.email_address_header()
if not addr and user.email_addresses:
addr = user.email_addresses[0]
log.warning(
'User %s has not set primary email address, using %s',
user._id, addr)
if not addr:
log.error(
"User %s (%s) has not set any email address, can't deliver",
user._id, user.username)
continue
if user.get_pref('email_format') == 'plain':
addrs_plain.append(addr)
else:
addrs_multi.append(addr)
multi_msg, plain_msg = create_multipart_msg(text, metalink)
smtp_client.sendmail(
addrs_multi, fromaddr, reply_to, subject, message_id,
in_reply_to, multi_msg, sender=sender, references=references)
smtp_client.sendmail(
addrs_plain, fromaddr, reply_to, subject, message_id,
in_reply_to, plain_msg, sender=sender, references=references)
@task
def sendsimplemail(
fromaddr,
toaddr,
text,
reply_to,
subject,
message_id,
in_reply_to=None,
sender=None,
references=None,
cc=None):
'''
Send a single mail to the specified address.
It is best for single user notifications.
:param fromaddr: ObjectId or str(ObjectId) of user, or email address str
:param toaddr: ObjectId or str(ObjectId) of user, or email address str
'''
from allura import model as M
if fromaddr is None:
fromaddr = g.noreply
elif not isinstance(fromaddr, basestring) or '@' not in fromaddr:
log.warning('Looking up user with fromaddr: %s', fromaddr)
user = M.User.query.get(_id=ObjectId(fromaddr), disabled=False, pending=False)
if not user:
log.warning('Cannot find user with ID: %s', fromaddr)
fromaddr = g.noreply
else:
fromaddr = user.email_address_header()
if not isinstance(toaddr, basestring) or '@' not in toaddr:
log.warning('Looking up user with toaddr: %s', toaddr)
user = M.User.query.get(_id=ObjectId(toaddr), disabled=False, pending=False)
if not user:
log.warning('Cannot find user with ID: %s', toaddr)
toaddr = g.noreply
else:
toaddr = user.email_address_header()
multi_msg, plain_msg = create_multipart_msg(text)
smtp_client.sendmail(
[toaddr], fromaddr, reply_to, subject, message_id,
in_reply_to, multi_msg, sender=sender, references=references, cc=cc, to=toaddr)
def send_system_mail_to_user(user_or_emailaddr, subject, text):
'''
Sends a standard email from the Allura system itself, to a user.
This is a helper function around sendsimplemail() that generates a new task
:param user_or_emailaddr: an email address (str) or a User object
:param subject: subject of the email
:param text: text of the email (markdown)
'''
if isinstance(user_or_emailaddr, basestring):
toaddr = user_or_emailaddr
else:
toaddr = user_or_emailaddr._id
email = {
'toaddr': toaddr,
'fromaddr': u'"{}" <{}>'.format(
config['site_name'],
config['forgemail.return_path']
),
'sender': unicode(config['forgemail.return_path']),
'reply_to': unicode(config['forgemail.return_path']),
'message_id': h.gen_message_id(),
'subject': subject,
'text': text,
}
sendsimplemail.post(**email)
| |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on May 17, 2017
@author: alfoa, wangc
Module where the base class and the specialization of different type of Model are
"""
#External Modules------------------------------------------------------------------------------------
import copy
import itertools
import numpy as np
import os
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .Dummy import Dummy
import Decorators
from SupervisedLearning import factory
from utils import utils, xmlUtils, mathUtils
from utils import InputData, InputTypes
from Decorators.Parallelization import Parallel
#Internal Modules End--------------------------------------------------------------------------------
# set enviroment variable to avoid parallelim degradation in some surrogate models
os.environ["MKL_NUM_THREADS"]="1"
class ROM(Dummy):
"""
ROM stands for Reduced Order Model. All the models here, first learn than predict the outcome
"""
interfaceFactory = factory
segmentNameToClass = {'segment': 'Segments',
'cluster': 'Clusters',
'interpolate': 'Interpolated'}
@classmethod
def getInputSpecification(cls, xml=None):
"""
Method to get a reference to a class that specifies the input data for
class cls. This one seems a bit excessive, are all of these for this class?
@ In, cls, the class for which we are retrieving the specification
@ In, xml, xml.etree.ElementTree.Element, optional, if given then only get specs for
corresponding subType requested by the node
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super().getInputSpecification()
inputSpecification.description = r"""A Reduced Order Model (ROM) is a mathematical model consisting of a fast
solution trained to predict a response of interest of a physical system.
The ``training'' process is performed by sampling the response of a physical
model with respect to variations of its parameters subject, for example, to
probabilistic behavior.
The results (outcomes of the physical model) of the sampling are fed into the
algorithm representing the ROM that tunes itself to replicate those results.
RAVEN supports several different types of ROMs, both internally developed and
imported through an external library called ``scikit-learn''~\cite{SciKitLearn}.
Currently in RAVEN, the user can use the \xmlAttr{subType} to select the ROM.
"""
inputSpecification.addParam('subType', required=True, param_type=InputTypes.StringType,
descr=r"""specify the type of ROM that will be used""")
######################
# dynamically loaded #
######################
# assert xml is not None
if xml is not None:
subType = xml.attrib.get('subType')
validClass = cls.interfaceFactory.returnClass(subType)
validSpec = validClass.getInputSpecification()
inputSpecification.mergeSub(validSpec)
## Add segment input specifications
segment = xml.find('Segment')
if segment is not None:
segType = segment.attrib.get('grouping', 'segment')
validClass = cls.interfaceFactory.returnClass(cls.segmentNameToClass[segType])
validSpec = validClass.getInputSpecification()
inputSpecification.mergeSub(validSpec)
return inputSpecification
@classmethod
def specializeValidateDict(cls):
"""
This method describes the types of input accepted with a certain role by the model class specialization
@ In, None
@ Out, None
"""
cls.validateDict['Input' ] = [cls.validateDict['Input' ][0]]
cls.validateDict['Input' ][0]['required' ] = True
cls.validateDict['Input' ][0]['multiplicity'] = 1
cls.validateDict['Output'][0]['type' ] = ['PointSet', 'HistorySet', 'DataSet']
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
self.amITrained = False # boolean flag, is the ROM trained?
self.supervisedEngine = None # dict of ROM instances (== number of targets => keys are the targets)
self.printTag = 'ROM MODEL' # label
self.cvInstanceName = None # the name of Cross Validation instance
self.cvInstance = None # Instance of provided cross validation
self._estimatorNameList = [] # the name list of estimator instance
self._estimatorList = [] # List of instances of provided estimators (ROM)
self._interfaceROM = None # Instance of provided ROM
self.pickled = False # True if ROM comes from a pickled rom
self.pivotParameterId = 'time' # The name of pivot parameter
self.canHandleDynamicData = False # check if the model can autonomously handle the time-dependency
# if not and time-dep data are passed in, a list of ROMs are constructed
self.isADynamicModel = False # True if the ROM is time-dependent
self.supervisedContainer = [] # List ROM instances
self.historySteps = [] # The history steps of pivot parameter
self.segment = False # True if segmenting/clustring/interpolating is requested
self.numThreads = 1 # number of threads used by the ROM
self.seed = None # seed information
self._segmentROM = None # segment rom instance
self._paramInput = None # the parsed xml input
# for Clustered ROM
self.addAssemblerObject('Classifier', InputData.Quantity.zero_to_one)
self.addAssemblerObject('Metric', InputData.Quantity.zero_to_infinity)
self.addAssemblerObject('CV', InputData.Quantity.zero_to_one)
self.addAssemblerObject('estimator', InputData.Quantity.zero_to_infinity)
def __getstate__(self):
"""
Method for choosing what gets serialized in this class
@ In, None
@ Out, d, dict, things to serialize
"""
d = copy.copy(self.__dict__)
if not self.amITrained:
supervisedEngineObj = d.pop("supervisedContainer")
del supervisedEngineObj
# NOTE assemblerDict isn't needed if ROM already trained, but it can create an infinite recursion
## for the ROMCollection if left in, so remove it on getstate.
del d['assemblerDict']
return d
def __setstate__(self, d):
"""
Method for unserializing.
@ In, d, dict, things to unserialize
@ Out, None
"""
# default setstate behavior
self.__dict__.update(d)
if not d['amITrained']:
# NOTE this will fail if the ROM requires the paramInput spec! Fortunately, you shouldn't pickle untrained.
modelInstance = self.interfaceFactory.returnInstance(self.subType)
self.supervisedContainer = [modelInstance]
# since we pop this out during saving state, initialize it here
self.assemblerDict = {}
def applyRunInfo(self, runInfo):
"""
Take information from the RunInfo
@ In, runInfo, dict, RunInfo info
@ Out, None
"""
self.numThreads = runInfo.get('NumThreads', 1)
def _readMoreXML(self,xmlNode):
"""
Function to read the portion of the xml input that belongs to this specialized class
and initialize some stuff based on the inputs got
@ In, xmlNode, xml.etree.ElementTree.Element, Xml element node
@ Out, None
"""
super()._readMoreXML(xmlNode)
paramInput = self.getInputSpecification(xml=xmlNode)()
paramInput.parseNode(xmlNode)
self._paramInput = paramInput
cvNode = paramInput.findFirst('CV')
if cvNode is not None:
self.cvInstanceName = cvNode.value
estimatorNodeList = paramInput.findAll('estimator')
self._estimatorNameList = [estimatorNode.value for estimatorNode in estimatorNodeList] if len(estimatorNodeList) > 0 else []
self._interfaceROM = self.interfaceFactory.returnInstance(self.subType)
segmentNode = paramInput.findFirst('Segment')
## remove Segment node before passing input xml to SupervisedLearning ROM
if segmentNode is not None:
self.segment = True
# determine type of segment to load -> limited by InputData to specific options
segType = segmentNode.parameterValues.get('grouping', 'segment')
self._segmentROM = self.interfaceFactory.returnInstance(self.segmentNameToClass[segType])
segment = xmlNode.find('Segment')
romXml = copy.deepcopy(xmlNode)
romXml.remove(segment)
else:
romXml = xmlNode
self._interfaceROM._readMoreXML(romXml)
if self.segment:
romInfo = {'name':self.name, 'modelInstance': self._interfaceROM}
self._segmentROM.setTemplateROM(romInfo)
self._segmentROM._handleInput(paramInput)
self.supervisedContainer = [self._segmentROM]
else:
self.supervisedContainer = [self._interfaceROM]
# if working with a pickled ROM, send along that information
if self.subType == 'pickledROM':
self.pickled = True
pivot = paramInput.findFirst('pivotParameter')
if pivot is not None:
self.pivotParameterId = pivot.value
self.canHandleDynamicData = self._interfaceROM.isDynamic()
def initialize(self,runInfo,inputs,initDict=None):
"""
Method to initialize this class
@ In, runInfo, dict, it is the run info from the jobHandler
@ In, inputs, list, it is a list containing whatever is passed with an input role in the step
@ In, initDict, dict, optional, dictionary of all objects available in the step is using this model
"""
# retrieve cross validation object
if self.cvInstance is None and self.cvInstanceName is not None:
self.cvInstance = self.retrieveObjectFromAssemblerDict('CV', self.cvInstanceName)
self.cvInstance.initialize(runInfo, inputs, initDict)
# only initialize once
if len(self._estimatorList) == 0 and len(self._estimatorNameList) > 0:
self._estimatorList = [self.retrieveObjectFromAssemblerDict('estimator', estimatorName) for estimatorName in self._estimatorNameList]
self._interfaceROM.setEstimator(self._estimatorList)
def reset(self):
"""
Reset the ROM
@ In, None
@ Out, None
"""
for rom in self.supervisedContainer:
rom.reset()
self.amITrained = False
def reseed(self,seed):
"""
Used to reset the seed of the underlying ROM.
@ In, seed, int, new seed to use
@ Out, None
"""
for rom in self.supervisedContainer:
rom.reseed(seed)
def getInitParams(self):
"""
This function is called from the base class to print some of the information inside the class.
Whatever is permanent in the class and not inherited from the parent class should be mentioned here
The information is passed back in the dictionary. No information about values that change during the simulation are allowed
@ In, None
@ Out, paramDict, dict, dictionary containing the parameter names as keys
and each parameter's initial value as the dictionary values
"""
paramDict = self.supervisedContainer[-1].returnInitialParameters()
return paramDict
def provideExpectedMetaKeys(self):
"""
Overrides the base class method to assure child engine is also polled for its keys.
@ In, None
@ Out, metaKeys, set(str), names of meta variables being provided
@ Out, metaParams, dict, the independent indexes related to expected keys
"""
# load own keys and params
metaKeys, metaParams = Dummy.provideExpectedMetaKeys(self)
# add from specific rom
keys, params = self.supervisedContainer[-1].provideExpectedMetaKeys()
metaKeys = metaKeys.union(keys)
metaParams.update(params)
return metaKeys, metaParams
def _copyModel(self, obj):
"""
Set this instance to be a copy of the provided object.
This is used to replace placeholder models with serialized objects
during deserialization in IOStep.
Also train this model.
@ In, obj, instance, the instance of the object to copy from
@ Out, None
"""
# save reseeding parameters from pickledROM
loadSettings = {'seed': self.seed, 'paramInput': self._paramInput}
# train the ROM from the unpickled object
self.train(obj)
self.setAdditionalParams(loadSettings)
self.pickled = False
def train(self,trainingSet):
"""
This function train the ROM
@ In, trainingSet, dict or PointSet or HistorySet, data used to train the ROM; if an HistorySet is provided the a list of ROM is created in order to create a temporal-ROM
@ Out, None
"""
if type(trainingSet).__name__ == 'ROM':
self.trainingSet = copy.copy(trainingSet.trainingSet)
self.amITrained = copy.deepcopy(trainingSet.amITrained)
self.supervisedContainer = copy.deepcopy(trainingSet.supervisedContainer)
self.seed = trainingSet.seed
else:
# TODO: The following check may need to be moved to Dummy Class -- wangc 7/30/2018
if type(trainingSet).__name__ != 'dict' and trainingSet.type == 'HistorySet':
if not trainingSet.checkIndexAlignment(indexesToCheck=self.pivotParameterId):
self.raiseAnError(IOError, "The data provided by the data object", trainingSet.name, "is not synchonized!",
"The time-dependent ROM requires all the histories are synchonized!")
self.trainingSet = copy.copy(self._inputToInternal(trainingSet))
self._replaceVariablesNamesWithAliasSystem(self.trainingSet, 'inout', False)
self.supervisedContainer[0].setAssembledObjects(self.assemblerDict)
# if training using ROMCollection, special treatment
if self.segment:
self.supervisedContainer[0].train(self.trainingSet)
else:
# not a collection # TODO move time-dependent snapshots to collection!
## time-dependent or static ROM?
if any(type(x).__name__ == 'list' for x in self.trainingSet.values()):
# we need to build a "time-dependent" ROM
self.isADynamicModel = True
if self.pivotParameterId not in list(self.trainingSet.keys()):
self.raiseAnError(IOError, 'The pivot parameter "{}" is not present in the training set.'.format(self.pivotParameterId),
'A time-dependent-like ROM cannot be created!')
if type(self.trainingSet[self.pivotParameterId]).__name__ != 'list':
self.raiseAnError(IOError, 'The pivot parameter "{}" is not a list.'.format(self.pivotParameterId),
" Are you sure it is part of the output space of the training set?")
self.historySteps = self.trainingSet.get(self.pivotParameterId)[-1]
if not len(self.historySteps):
self.raiseAnError(IOError, "the training set is empty!")
# intrinsically time-dependent or does the Gate need to handle it?
if self.canHandleDynamicData:
# the ROM is able to manage the time dependency on its own
self.supervisedContainer[-1].train(self.trainingSet)
else:
# TODO we can probably migrate this time-dependent handling to a type of ROMCollection!
# we need to construct a chain of ROMs
# the check on the number of time steps (consistency) is performed inside the historySnapShoots method
# get the time slices
newTrainingSet = mathUtils.historySnapShoots(self.trainingSet, len(self.historySteps))
assert type(newTrainingSet).__name__ == 'list'
# copy the original ROM
originalROM = self.supervisedContainer[0]
# start creating and training the time-dep ROMs
self.supervisedContainer = [copy.deepcopy(originalROM) for _ in range(len(self.historySteps))]
# train
for ts in range(len(self.historySteps)):
self.supervisedContainer[ts].train(newTrainingSet[ts])
# if a static ROM ...
else:
#self._replaceVariablesNamesWithAliasSystem(self.trainingSet, 'inout', False)
self.supervisedContainer[0].train(self.trainingSet)
# END if ROMCollection
self.amITrained = True
def confidence(self,request,target = None):
"""
This is to get a value that is inversely proportional to the confidence that we have
forecasting the target value for the given set of features. The reason to chose the inverse is because
in case of normal distance this would be 1/distance that could be infinity
@ In, request, datatype, feature coordinates (request)
@ Out, confidenceDict, dict, the dict containing the confidence on each target ({'target1':np.array(size 1 or n_ts),'target2':np.array(...)}
"""
request = self._inputToInternal(request)
if not self.amITrained:
self.raiseAnError(RuntimeError, "ROM "+self.name+" has not been trained yet and, consequentially, can not be evaluated!")
confidenceDict = {}
for rom in self.supervisedContainer:
sliceEvaluation = rom.confidence(request)
if len(list(confidenceDict.keys())) == 0:
confidenceDict.update(sliceEvaluation)
else:
for key in confidenceDict.keys():
confidenceDict[key] = np.append(confidenceDict[key],sliceEvaluation[key])
return confidenceDict
@Decorators.timingProfile
def evaluate(self, request):
"""
When the ROM is used directly without need of having the sampler passing in the new values evaluate instead of run should be used
@ In, request, datatype, feature coordinates (request)
@ Out, resultsDict, dict, the dict containing the outputs for each target ({'target1':np.array(size 1 or n_ts),'target2':np.array(...)}
"""
request = self._inputToInternal(request)
if self.pickled:
self.raiseAnError(RuntimeError,'ROM "', self.name, '" has not been loaded yet! Use an IOStep to load it.')
if not self.amITrained:
self.raiseAnError(RuntimeError, "ROM ", self.name, " has not been trained yet and, consequentially, can not be evaluated!")
resultsDict = {}
if self.segment:
resultsDict = self.supervisedContainer[0].run(request)
else:
for rom in self.supervisedContainer:
sliceEvaluation = rom.run(request)
if len(list(resultsDict.keys())) == 0:
resultsDict.update(sliceEvaluation)
else:
for key in resultsDict.keys():
resultsDict[key] = np.append(resultsDict[key],sliceEvaluation[key])
# assure numpy array formatting # TODO can this be done in the supervised engine instead?
for k,v in resultsDict.items():
resultsDict[k] = np.atleast_1d(v)
return resultsDict
def _externalRun(self,inRun):
"""
Method that performs the actual run of the imported external model (separated from run method for parallelization purposes)
@ In, inRun, datatype, feature coordinates
@ Out, returnDict, dict, the return dictionary containing the results
"""
returnDict = self.evaluate(inRun)
self._replaceVariablesNamesWithAliasSystem(returnDict, 'output', True)
self._replaceVariablesNamesWithAliasSystem(inRun, 'input', True)
return returnDict
@Parallel()
def evaluateSample(self, myInput, samplerType, kwargs):
"""
This will evaluate an individual sample on this model. Note, parameters
are needed by createNewInput and thus descriptions are copied from there.
@ In, myInput, list, the inputs (list) to start from to generate the new one
@ In, samplerType, string, is the type of sampler that is calling to generate a new input
@ In, kwargs, dict, is a dictionary that contains the information coming from the sampler,
a mandatory key is the sampledVars'that contains a dictionary {'name variable':value}
@ Out, rlz, dict, This will hold two pieces of information,
the first will be the input data used to generate this sample,
the second will be the output of this model given the specified
inputs
"""
Input = self.createNewInput(myInput, samplerType, **kwargs)
inRun = self._manipulateInput(Input[0])
# collect results from model run
result = self._externalRun(inRun)
# build realization
# assure rlz has all metadata
self._replaceVariablesNamesWithAliasSystem(kwargs['SampledVars'] ,'input',True)
rlz = dict((var,np.atleast_1d(kwargs[var])) for var in kwargs.keys())
# update rlz with input space from inRun and output space from result
rlz.update(dict((var,np.atleast_1d(inRun[var] if var in kwargs['SampledVars'] else result[var])) for var in set(itertools.chain(result.keys(),inRun.keys()))))
return rlz
def setAdditionalParams(self, params):
"""
Used to set parameters at a time other than initialization (such as deserializing).
@ In, params, dict, new params to set (internals depend on ROM)
@ Out, None
"""
for rom in self.supervisedContainer:
rom.setAdditionalParams(params)
def convergence(self,trainingSet):
"""
This is to get the cross validation score of ROM
@ In, trainingSize, int, the size of current training size
@ Out, cvScore, dict, the dict containing the score of cross validation
"""
cvScore = self._crossValidationScore(trainingSet)
return cvScore
def _crossValidationScore(self, trainingSet):
"""
The function calculates the cross validation score on ROMs
@ In, trainingSize, int, the size of current training size
@ Out, cvMetrics, dict, the calculated cross validation metrics
"""
if len(self.supervisedContainer) > 1:
self.raiseAnError(IOError, "Cross Validation Method is not implemented for Clustered ROMs")
cvMetrics = None
if self._checkCV(len(trainingSet)):
# reset the ROM before perform cross validation
cvMetrics = {}
self.reset()
outputMetrics = self.cvInstance._pp.run([self, trainingSet])
exploredTargets = []
for cvKey, metricValues in outputMetrics.items():
info = self.cvInstance._pp._returnCharacteristicsOfCvGivenOutputName(cvKey)
if info['targetName'] in exploredTargets:
self.raiseAnError(IOError, "Multiple metrics are used in cross validation '", self.cvInstance.name, "' for ROM '", rom.name, "'!")
exploredTargets.append(info['targetName'])
cvMetrics[self.name] = (info['metricType'], metricValues)
return cvMetrics
def _checkCV(self, trainingSize):
"""
The function will check whether we can use Cross Validation or not
@ In, trainingSize, int, the size of current training size
@ Out, None
"""
useCV = True
initDict = self.cvInstance._pp.initializationOptionDict
if 'SciKitLearn' in initDict.keys() and 'n_splits' in initDict['SciKitLearn'].keys():
if trainingSize < utils.intConversion(initDict['SciKitLearn']['n_splits']):
useCV = False
else:
useCV = False
return useCV
def writePointwiseData(self, writeTo):
"""
Called by the OutStreamPrint object to cause the ROM to print information about itself
@ In, writeTo, DataObject, data structure to add data to
@ Out, None
"""
# TODO handle statepoint ROMs (dynamic, but rom doesn't handle intrinsically)
## should probably let the LearningGate handle this! It knows how to stitch together pieces, sort of.
for engine in self.supervisedContainer:
engine.writePointwiseData(writeTo)
def writeXML(self, what='all'):
"""
Called by the OutStreamPrint object to cause the ROM to print itself
@ In, what, string, optional, keyword requesting what should be printed
@ Out, xml, xmlUtils.StaticXmlElement, written meta
"""
#determine dynamic or static
dynamic = self.isADynamicModel
# determine if it can handle dynamic data
handleDynamicData = self.canHandleDynamicData
# get pivot parameter
pivotParameterId = self.pivotParameterId
# find some general settings needed for either dynamic or static handling
## get all the targets the ROMs have
ROMtargets = self.supervisedContainer[0].target
## establish requested targets
targets = ROMtargets if what=='all' else what.split(',')
## establish sets of engines to work from
engines = self.supervisedContainer
# if the ROM is "dynamic" (e.g. time-dependent targets), then how we print depends
# on whether the engine is naturally dynamic or whether we need to handle that part.
if dynamic and not handleDynamicData:
# time-dependent, but we manage the output (chopped)
xml = xmlUtils.DynamicXmlElement('ROM', pivotParam = pivotParameterId)
## pre-print printing
engines[0].writeXMLPreamble(xml) #let the first engine write the preamble
for s,rom in enumerate(engines):
pivotValue = self.historySteps[s]
#for target in targets: # should be handled by SVL engine or here??
# #skip the pivot param
# if target == pivotParameterId:
# continue
#otherwise, call engine's print method
self.raiseAMessage('Printing time-like',pivotValue,'ROM XML')
subXML = xmlUtils.StaticXmlElement(self.supervisedContainer[0].printTag)
rom.writeXML(subXML, skip = [pivotParameterId])
for element in subXML.getRoot():
xml.addScalarNode(element, pivotValue)
#xml.addScalarNode(subXML.getRoot(), pivotValue)
else:
# directly accept the results from the engine
xml = xmlUtils.StaticXmlElement(self.name)
## pre-print printing
engines[0].writeXMLPreamble(xml)
engines[0].writeXML(xml)
return xml
| |
from collections import defaultdict
from json import JSONEncoder
import hashlib
class Node(object):
APPENDIX = u'appendix'
INTERP = u'interp'
REGTEXT = u'regtext'
SUBPART = u'subpart'
EMPTYPART = u'emptypart'
INTERP_MARK = 'Interp'
def __init__(self, text='', children=[], label=[], title=None,
node_type=REGTEXT, source_xml=None):
self.text = unicode(text)
# defensive copy
self.children = list(children)
self.label = [str(l) for l in label if l != '']
title = unicode(title or '')
self.title = title or None
self.node_type = node_type
self.source_xml = source_xml
def __repr__(self):
return (("Node( text = %s, children = %s, label = %s, title = %s, "
+ "node_type = %s)") % (repr(self.text), repr(self.children),
repr(self.label), repr(self.title), repr(self.node_type)))
def __cmp__(self, other):
return cmp(repr(self), repr(other))
def label_id(self):
return '-'.join(self.label)
class NodeEncoder(JSONEncoder):
"""Custom JSON encoder to handle Node objects"""
def default(self, obj):
if isinstance(obj, Node):
fields = dict(obj.__dict__)
if obj.title is None:
del fields['title']
for field in ('tagged_text', 'source_xml', 'child_labels'):
if field in fields:
del fields[field]
return fields
return super(NodeEncoder, self).default(obj)
def node_decode_hook(d):
"""Convert a JSON object into a Node"""
if set(
('text', 'children',
'label', 'node_type')) - set(d.keys()) == set():
return Node(
d['text'], d['children'], d['label'],
d.get('title', None), d['node_type'])
else:
return d
def walk(node, fn):
"""Perform fn for every node in the tree. Pre-order traversal. fn must
be a function that accepts a root node."""
result = fn(node)
if result is not None:
results = [result]
else:
results = []
for child in node.children:
results += walk(child, fn)
return results
def find(root, label):
"""Search through the tree to find the node with this label."""
def check(node):
if node.label_id() == label:
return node
response = walk(root, check)
if response:
return response[0]
def join_text(node):
"""Join the text of this node and all children"""
bits = []
walk(node, lambda n: bits.append(n.text))
return ''.join(bits)
def merge_duplicates(nodes):
"""Given a list of nodes with the same-length label, merge any
duplicates (by combining their children)"""
found_pair = None
for lidx, lhs in enumerate(nodes):
for ridx, rhs in enumerate(nodes[lidx + 1:], lidx + 1):
if lhs.label == rhs.label:
found_pair = (lidx, ridx)
if found_pair:
lidx, ridx = found_pair
lhs, rhs = nodes[lidx], nodes[ridx]
lhs.children.extend(rhs.children)
return merge_duplicates(nodes[:ridx] + nodes[ridx + 1:])
else:
return nodes
def treeify(nodes):
"""Given a list of nodes, convert those nodes into the appropriate tree
structure based on their labels. This assumes that all nodes will fall
under a set of 'root' nodes, which have the min-length label."""
if not nodes:
return nodes
min_len, with_min = len(nodes[0].label), []
for node in nodes:
if len(node.label) == min_len:
with_min.append(node)
elif len(node.label) < min_len:
min_len = len(node.label)
with_min = [node]
with_min = merge_duplicates(with_min)
roots = []
for root in with_min:
if root.label[-1] == Node.INTERP_MARK:
is_child = lambda n: n.label[:len(root.label)-1] == root.label[:-1]
else:
is_child = lambda n: n.label[:len(root.label)] == root.label
children = [n for n in nodes if n.label != root.label and is_child(n)]
root.children = root.children + treeify(children)
roots.append(root)
return roots
class FrozenNode(object):
"""Immutable interface for nodes. No guarantees about internal state."""
_pool = defaultdict(set) # collection of all FrozenNodes, keyed by hash
def __init__(self, text='', children=(), label=(), title='',
node_type=Node.REGTEXT, tagged_text=''):
self._text = text or ''
self._children = tuple(children)
self._label = tuple(label)
self._title = title or ''
self._node_type = node_type
self._tagged_text = tagged_text or ''
self._hash = self._generate_hash()
FrozenNode._pool[self.hash].add(self)
@property
def text(self):
return self._text
@property
def children(self):
return self._children
@property
def label(self):
return self._label
@property
def title(self):
return self._title
@property
def node_type(self):
return self._node_type
@property
def tagged_text(self):
return self._tagged_text
@property
def hash(self):
return self._hash
def _generate_hash(self):
"""Called during instantiation. Digests all fields"""
hasher = hashlib.sha256()
hasher.update(self.text.encode('utf-8'))
hasher.update(self.tagged_text.encode('utf-8'))
hasher.update(self.title.encode('utf-8'))
hasher.update(self.label_id.encode('utf-8'))
hasher.update(self.node_type)
for child in self.children:
hasher.update(child.hash)
return hasher.hexdigest()
def __hash__(self):
"""As the hash property is already distinctive, re-use it"""
return hash(self.hash)
def __eq__(self, other):
"""We define equality as having the same fields except for children.
Instead of recursively inspecting them, we compare only their hash
(this is a Merkle tree)"""
return (other.__class__ == self.__class__
and self.hash == other.hash
# Compare the fields to limit the effect of hash collisions
and self.text == other.text
and self.title == other.title
and self.node_type == other.node_type
and self.tagged_text == other.tagged_text
and self.label_id == other.label_id
and [c.hash for c in self.children] ==
[c.hash for c in other.children])
@staticmethod
def from_node(node):
"""Convert a struct.Node (or similar) into a struct.FrozenNode. This
also checks if this node has already been instantiated. If so, it
returns the instantiated version (i.e. only one of each identical node
exists in memory)"""
children = map(FrozenNode.from_node, node.children)
fresh = FrozenNode(text=node.text, children=children, label=node.label,
title=node.title or '', node_type=node.node_type,
tagged_text=getattr(node, 'tagged_text', '') or '')
for el in FrozenNode._pool[fresh.hash]:
if el == fresh:
return el # note we are _not_ returning fresh
@property
def label_id(self):
"""Convert label into a string"""
if not hasattr(self, '_label_id'):
self._label_id = '-'.join(self.label)
return self._label_id
| |
# Load protobufs
from POGOProtos.Networking.Requests import(
Request_pb2 as Request,
RequestType_pb2 as RequestType
)
from POGOProtos.Networking.Requests.Messages import(
EncounterMessage_pb2 as EncounterMessage,
FortSearchMessage_pb2 as FortSearchMessage,
FortDetailsMessage_pb2 as FortDetailsMessage,
CatchPokemonMessage_pb2 as CatchPokemonMessage,
GetMapObjectsMessage_pb2 as GetMapObjectsMessage,
EvolvePokemonMessage_pb2 as EvolvePokemonMessage,
ReleasePokemonMessage_pb2 as ReleasePokemonMessage,
UseItemCaptureMessage_pb2 as UseItemCaptureMessage,
UseItemEggIncubatorMessage_pb2 as UseItemEggIncubatorMessage,
RecycleInventoryItemMessage_pb2 as RecycleInventoryItemMessage,
NicknamePokemonMessage_pb2 as NicknamePokemonMessage,
UseItemPotionMessage_pb2 as UseItemPotionMessage,
UseItemReviveMessage_pb2 as UseItemReviveMessage,
SetPlayerTeamMessage_pb2 as SetPlayerTeamMessage,
SetFavoritePokemonMessage_pb2 as SetFavoritePokemonMessage,
LevelUpRewardsMessage_pb2 as LevelUpRewardsMessage,
UseItemXpBoostMessage_pb2 as UseItemXpBoostMessage,
UpgradePokemonMessage_pb2 as UpgradePokemonMessage
)
# Load Local
from pogo.inventory import items
from pogo.session_bare import PogoSessionBare
class PogoSession(PogoSessionBare):
"""Session class with more robust calls"""
# Core api calls
# Get profile
def getProfile(self):
# Create profile request
payload = [Request.Request(
request_type=RequestType.GET_PLAYER
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.profile.ParseFromString(res.returns[0])
# Return everything
return self._state.profile
# Hooks for those bundled in default
def getEggs(self):
self.getProfile()
return self._state.eggs
def getInventory(self):
self.getProfile()
return self._inventory
def getBadges(self):
self.getProfile()
return self._state.badges
def getDownloadSettings(self):
self.getProfile()
return self._state.settings
# Get Location
def getMapObjects(self, radius=10, bothDirections=True):
# Work out location details
cells = self.location.getCells(radius, bothDirections)
latitude, longitude, _ = self.getCoordinates()
timestamps = [0, ] * len(cells)
# Create request
payload = [Request.Request(
request_type=RequestType.GET_MAP_OBJECTS,
request_message=GetMapObjectsMessage.GetMapObjectsMessage(
cell_id=cells,
since_timestamp_ms=timestamps,
latitude=latitude,
longitude=longitude
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.mapObjects.ParseFromString(res.returns[0])
# Return everything
return self._state.mapObjects
# Get Location
def getFortSearch(self, fort):
# Create request
payload = [Request.Request(
request_type=RequestType.FORT_SEARCH,
request_message=FortSearchMessage.FortSearchMessage(
fort_id=fort.id,
player_latitude=self.location.latitude,
player_longitude=self.location.longitude,
fort_latitude=fort.latitude,
fort_longitude=fort.longitude
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.fortSearch.ParseFromString(res.returns[0])
# Return everything
return self._state.fortSearch
# get details about fort (image, text etc..)
def getFortDetails(self, fort):
# Create request
payload = [Request.Request(
request_type=RequestType.FORT_DETAILS,
request_message=FortDetailsMessage.FortDetailsMessage(
fort_id=fort.id,
latitude=fort.latitude,
longitude=fort.longitude,
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.fortDetails.ParseFromString(res.returns[0])
# Return everything
return self._state.fortDetails
# Get encounter (akin to tapping a pokemon)
def encounterPokemon(self, pokemon):
# Create request
payload = [Request.Request(
request_type=RequestType.ENCOUNTER,
request_message=EncounterMessage.EncounterMessage(
encounter_id=pokemon.encounter_id,
spawn_point_id=pokemon.spawn_point_id,
player_latitude=self.location.latitude,
player_longitude=self.location.longitude
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.encounter.ParseFromString(res.returns[0])
# Return everything
return self._state.encounter
# Upon Encounter, try and catch
def catchPokemon(
self, pokemon, pokeball=items.POKE_BALL,
normalized_reticle_size=1.950, hit_pokemon=True,
spin_modifier=0.850, normalized_hit_position=1.0
):
# Create request
payload = [Request.Request(
request_type=RequestType.CATCH_POKEMON,
request_message=CatchPokemonMessage.CatchPokemonMessage(
encounter_id=pokemon.encounter_id,
pokeball=pokeball,
normalized_reticle_size=normalized_reticle_size,
spawn_point_id=pokemon.spawn_point_id,
hit_pokemon=hit_pokemon,
spin_modifier=spin_modifier,
normalized_hit_position=normalized_hit_position
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.catch.ParseFromString(res.returns[0])
# Return everything
return self._state.catch
# Use a razz berry or the like
def useItemCapture(self, item_id, pokemon):
# Create request
payload = [Request.Request(
request_type=RequestType.USE_ITEM_CAPTURE,
request_message=UseItemCaptureMessage.UseItemCaptureMessage(
item_id=item_id,
encounter_id=pokemon.encounter_id
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload, defaults=False)
# Parse
self._state.itemCapture.ParseFromString(res.returns[0])
# Return everything
return self._state.itemCapture
# Use a Potion (Hyper potion, super, etc..)
def useItemPotion(self, item_id, pokemon):
# Create Request
payload = [Request.Request(
request_type=RequestType.USE_ITEM_POTION,
request_message=UseItemPotionMessage.UseItemPotionMessage(
item_id=item_id,
pokemon_id=pokemon.id
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload, defaults=False)
# Parse
self._state.itemPotion.ParseFromString(res.returns[0])
# Return everything
return self._state.itemPotion
# Use a Revive
def useItemRevive(self, item_id, pokemon):
# Create request
payload = [Request.Request(
request_type=RequestType.USE_ITEM_REVIVE,
request_message=UseItemReviveMessage.UseItemReviveMessage(
item_id=item_id,
pokemon_id=pokemon.id
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload, defaults=False)
# Parse
self._state.itemRevive.ParseFromString(res.returns[0])
# Return everything
return self._state.itemRevive
# Evolve Pokemon (check for candies first)
def evolvePokemon(self, pokemon):
payload = [Request.Request(
request_type=RequestType.EVOLVE_POKEMON,
request_message=EvolvePokemonMessage.EvolvePokemonMessage(
pokemon_id=pokemon.id
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.evolve.ParseFromString(res.returns[0])
# Return everything
return self._state.evolve
# 'Transfers' a pokemon.
# We all secretly know Pr. Willow is probably eating them
def releasePokemon(self, pokemon):
payload = [Request.Request(
request_type=RequestType.RELEASE_POKEMON,
request_message=ReleasePokemonMessage.ReleasePokemonMessage(
pokemon_id=pokemon.id
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload, defaults=False)
# Parse
self._state.release.ParseFromString(res.returns[0])
# Return everything
return self._state.release
# Check for level up and apply
def getLevelUp(self, newLevel):
payload = [Request.Request(
request_type=RequestType.LEVEL_UP_REWARDS,
request_message=LevelUpRewardsMessage.LevelUpRewardsMessage(
level=newLevel
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload, defaults=False)
# Parse
self._state.levelUp.ParseFromString(res.returns[0])
# Return everything
return self._state.levelUp
# Use a lucky egg
def useXpBoost(self):
payload = [Request.Request(
request_type=RequestType.USE_ITEM_XP_BOOST,
request_message=UseItemXpBoostMessage.UseItemXpBoostMessage(
item_id=items.LUCKY_EGG
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload, defaults=False)
# Parse
self._state.xpBoost.ParseFromString(res.returns[0])
# Return everything
return self._state.xpBoost
# Throw away items
def recycleItem(self, item_id, count):
# Create request
payload = [Request.Request(
request_type=RequestType.RECYCLE_INVENTORY_ITEM,
request_message=RecycleInventoryItemMessage.RecycleInventoryItemMessage(
item_id=item_id,
count=count
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.recycle.ParseFromString(res.returns[0])
# Return everything
return self._state.recycle
# set an Egg into an incubator
def setEgg(self, item, pokemon):
# Create request
payload = [Request.Request(
request_type=RequestType.USE_ITEM_EGG_INCUBATOR,
request_message=UseItemEggIncubatorMessage.UseItemEggIncubatorMessage(
item_id=item.id,
pokemon_id=pokemon.id
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.incubator.ParseFromString(res.returns[0])
# Return everything
return self._state.incubator
# Set the name of a given pokemon
def nicknamePokemon(self, pokemon, nickname):
# Create request
payload = [Request.Request(
request_type=RequestType.NICKNAME_POKEMON,
request_message=NicknamePokemonMessage.NicknamePokemonMessage(
pokemon_id=pokemon.id,
nickname=nickname
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload)
# Parse
self._state.nickname.ParseFromString(res.returns[0])
# Return everything
return self._state.nickname
# Set Pokemon as favorite
def setFavoritePokemon(self, pokemon, is_favorite):
# Create Request
payload = [Request.Request(
request_type=RequestType.SET_FAVORITE_POKEMON,
request_message=SetFavoritePokemonMessage.SetFavoritePokemonMessage(
pokemon_id=pokemon.id,
is_favorite=is_favorite
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload, defaults=False)
# Parse
self._state.favoritePokemon.ParseFromString(res.returns[0])
# Return Everything
return self._state.favoritePokemon
# Upgrade a Pokemon's CP
def upgradePokemon(self, pokemon):
# Create request
payload = [Request.Request(
request_type=RequestType.UPGRADE_POKEMON,
request_message=UpgradePokemonMessage.UpgradePokemonMessage(
pokemon_id=pokemon.id
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload, defaults=False)
# Parse
self._state.upgradePokemon.ParseFromString(res.returns[0])
# Return everything
return self._state.upgradePokemon
# Choose player's team- "BLUE","RED", or "YELLOW".
def setPlayerTeam(self, team):
# Create request
payload = [Request.Request(
request_type=RequestType.SET_PLAYER_TEAM,
request_message=SetPlayerTeamMessage.SetPlayerTeamMessage(
team=team
).SerializeToString()
)]
# Send
res = self.wrapAndRequest(payload, defaults=False)
# Parse
self._state.playerTeam.ParseFromString(res.returns[0])
# Return everything
return self._state.playerTeam
| |
# Copyright (c) 2007 by Mark Bergsma <mark@nedworks.org>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Twisted Factory, BGP implementation.
"""
import logging
import socket
import platform
import struct
import sys
import netaddr
from twisted.internet import protocol
from twisted.internet import reactor
from oslo_config import cfg
from yabgp.core.protocol import BGP
from yabgp.core.fsm import FSM
from yabgp.common import constants as bgp_cons
from yabgp.common.afn import AFNUM_INET
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class BGPFactory(protocol.Factory):
"""Base factory for creating BGP protocol instances."""
protocol = BGP
FSM = FSM
def buildProtocol(self, addr):
"""Builds a BGPProtocol instance.
:param addr : address used for buliding protocol.
"""
return protocol.Factory.buildProtocol(self, addr)
def startedConnecting(self, connector):
"""Called when a connection attempt has been initiated.
:param connector : Twisted connector
"""
pass
def clientConnectionLost(self, connector, reason):
""" Called when a TCP client connection was lost.
:param connector : Twisted connector
:param reason : connection faied reason.
"""
LOG.info("Client connection lost:%s", reason.getErrorMessage())
class BGPPeering(BGPFactory):
"""Class managing a BGP session with a peer.
One connection, One BGPPeering class.
"""
def __init__(self, myasn=None, myaddr=None, peerasn=None, peeraddr=None,
afisafi=None, md5=None, handler=None):
"""Initial a BGPPeering instance.
:param myasn: local bgp as number.
:param myaddr: local ip address.
:param peerasn: remote bgp peer as number
:param peeraddr: remote peer ip address.
:param msgpath: the path to store bgp message file.
:param afisafi: afi and safi
:param md5: TCP md5 string
"""
LOG.info('Init BGPPeering for peer %s', peeraddr)
self.my_asn = myasn
self.my_addr = myaddr
self.peer_addr = peeraddr
self.peer_id = None
self.bgp_id = None
self.peer_asn = peerasn
self.afi_safi = afisafi
self.md5 = md5
self.status = False
self.fsm = BGPFactory.FSM(self)
self.handler = handler
# reference to the BGPProtocol instance in ESTAB state
self.estab_protocol = None
def buildProtocol(self, addr):
"""Builds a BGP protocol instance
:param addr: IP address used for building protocol.
"""
LOG.info("[%s]Building a new BGP protocol instance", self.peer_addr)
p = BGPFactory.buildProtocol(self, addr)
if p is not None:
self._initProtocol(p, addr)
self.estab_protocol = p
return p
def _initProtocol(self, protocol, addr):
"""Initializes a BGPProtocol instance
:param protocol: twisted Protocol
:param addr: ip address
"""
protocol.bgp_peering = self
# Hand over the FSM
protocol.fsm = self.fsm
protocol.fsm.protocol = protocol
if addr.port == bgp_cons.PORT:
protocol.fsm.state = bgp_cons.ST_CONNECT
else:
protocol.fsm.state = bgp_cons.ST_ACTIVE
def clientConnectionFailed(self, connector, reason):
"""Called when the outgoing connection failed.
:param connector: Twisted connector
:param reason: connection failed reason
"""
error_msg = "[%s]Client connection failed: %s" % (self.peer_addr, reason.getErrorMessage())
self.handler.on_connection_failed(self.peer_addr, reason.getErrorMessage())
LOG.info(error_msg)
# There is no protocol instance yet at this point.
# Catch a possible NotificationException
try:
self.fsm.connection_failed()
except Exception as e:
LOG.info("[%s]Client connection failed: %s", self.peer_addr, e)
def automatic_start(self, idle_hold=False):
"""BGP AutomaticStart event (event 3)
:param idle_hold: flag represents used Idle Hold
"""
if self.fsm.state == bgp_cons.ST_IDLE:
if self.fsm.automatic_start(idle_hold):
self.status = True
# Create outbound connection as a client
self.connect()
def manual_start(self, idle_hold=False):
if self.fsm.state == bgp_cons.ST_ESTABLISHED:
return "EST"
elif self.fsm.state == bgp_cons.ST_IDLE:
if self.fsm.manual_start(idle_hold=idle_hold):
self.status = True
self.connect()
return True
else:
return False
def manual_stop(self):
"""BGP ManualStop event (event 2) Returns a DeferredList that
will fire once the connection(s) have closed"""
return self.fsm.manual_stop()
def connection_closed(self, pro, disconnect=False):
"""
Called by FSM or Protocol when the BGP connection has been closed.
:param pro: twisted protocol
:param disconnect: the status of connection
"""
LOG.info("[%s]Connection closed", self.peer_addr)
if pro is not None:
# Connection succeeded previously, protocol exists
# Remove the protocol, if it exists
if pro is self.estab_protocol:
self.estab_protocol = None
# self.fsm should still be valid and set to ST_IDLE
self.fsm.state = bgp_cons.ST_IDLE
if self.fsm.allow_automatic_start:
self.automatic_start(idle_hold=True)
def connect_retry(self):
"""Called by FSM when we should reattempt to connect.
"""
try:
self.connect()
except Exception as e:
LOG.error(e)
import traceback
LOG.debug(traceback.format_exc())
def set_peer_id(self, bgp_id):
"""
Should be called when an Open message was received from a peer.
Sets the BGP identifier of the peer if it wasn't set yet. If the
new peer id is unequal to the existing one, CEASE all connections.
s
:param bgp_id: BGP ID
"""
if self.peer_id is None:
self.peer_id = bgp_id
LOG.info('Set BGP peer id %s', bgp_id)
elif self.peer_id != bgp_id:
# Ouch, schizophrenia. The BGP id of the peer is unequal to
# the ids of current and/or previous sessions. Close all
# connections.
self.peer_id = None
def connect(self):
"""Initiates a TCP client connection to the peer. Should only be called from
BGPPeering or FSM, otherwise use manualStart() instead.
"""
# DEBUG
LOG.info("(Re)connect to %s", self.peer_addr)
if self.fsm.state != bgp_cons.ST_ESTABLISHED:
connector = reactor.connectTCP(
host=self.peer_addr,
port=bgp_cons.PORT,
factory=self,
timeout=30,
bindAddress=(self.my_addr, 0))
if isinstance(self.md5, str) and self.md5:
md5sig = self.get_tcp_md5sig(self.md5, self.peer_addr, bgp_cons.PORT)
if md5sig:
connector.transport.getHandle().setsockopt(
socket.IPPROTO_TCP, bgp_cons.TCP_MD5SIG, md5sig)
else:
sys.exit()
return True
else:
return False
@staticmethod
def get_tcp_md5sig(md5_str, host, port):
"""set tcp md5
"""
os_type = platform.system()
if os_type != 'Linux':
LOG.error('YABGP has no MD5 support for %s', os_type)
return None
# address family
if netaddr.IPAddress(host).version == 4:
# ipv4 address
afi = AFNUM_INET
elif netaddr.IPAddress(host).version == 6:
# ipv6 address
LOG.error("Does not support ipv6 address family")
sys.exit()
else:
afi = None
LOG.error('Peer address is not a valid ipv4/ipv6 address')
try:
n_port = socket.htons(port)
if afi == AFNUM_INET:
n_addr = socket.inet_pton(socket.AF_INET, host)
tcp_md5sig = 'HH4s%dx2xH4x%ds' % (
bgp_cons.SS_PADSIZE_IPV4, bgp_cons.TCP_MD5SIG_MAXKEYLEN)
md5sig = struct.pack(
tcp_md5sig, socket.AF_INET, n_port, n_addr, len(md5_str), md5_str.encode())
return md5sig
else:
return None
except socket.error as e:
LOG.error('This linux machine does not support TCP_MD5SIG: (%s)', str(e))
return None
| |
"""
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* If CACHE_MIDDLEWARE_ANONYMOUS_ONLY is set to True, only anonymous requests
(i.e., those not made by a logged-in user) will be cached. This is a simple
and effective way of avoiding the caching of the Django admin (and any other
user-specific content).
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import get_cache, DEFAULT_CACHE_ALIAS
from django.utils.cache import get_cache_key, learn_cache_key, patch_response_headers, get_max_age
class UpdateCacheMiddleware(object):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the response phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = get_cache(self.cache_alias)
def _session_accessed(self, request):
try:
return request.session.accessed
except AttributeError:
return False
def _should_update_cache(self, request, response):
if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:
return False
# If the session has not been accessed otherwise, we don't want to
# cause it to be accessed here. If it hasn't been accessed, then the
# user's logged-in status has not affected the response anyway.
if self.cache_anonymous_only and self._session_accessed(request):
assert hasattr(request, 'user'), "The Django cache middleware with CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True requires authentication middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.auth.middleware.AuthenticationMiddleware' before the CacheMiddleware."
if request.user.is_authenticated():
# Don't cache user-variable requests from authenticated users.
return False
return True
def process_response(self, request, response):
"""Sets the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code != 200:
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout == None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(object):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the request phase.
"""
def __init__(self):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = get_cache(self.cache_alias)
def process_request(self, request):
"""
Checks whether the page is already cached and returns the cached
version if available.
"""
if not request.method in ('GET', 'HEAD'):
request._cache_update_cache = False
return None # Don't bother checking the cache.
if self.cache_anonymous_only and request.user.is_authenticated():
request._cache_update_cache = False
return None # Do not serve from cache for authenticated users.
# Do not use cache for SSL.
if request.is_secure():
request._cache_update_cache = False
return None
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key, None)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == 'HEAD':
cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)
response = self.cache.get(cache_key, None)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, cache_timeout=None, cache_anonymous_only=None, **kwargs):
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
cache_kwargs = {}
try:
self.key_prefix = kwargs['key_prefix']
if self.key_prefix is not None:
cache_kwargs['KEY_PREFIX'] = self.key_prefix
else:
self.key_prefix = ''
except KeyError:
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_kwargs['KEY_PREFIX'] = self.key_prefix
try:
self.cache_alias = kwargs['cache_alias']
if self.cache_alias is None:
self.cache_alias = DEFAULT_CACHE_ALIAS
if cache_timeout is not None:
cache_kwargs['TIMEOUT'] = cache_timeout
except KeyError:
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
if cache_timeout is None:
cache_kwargs['TIMEOUT'] = settings.CACHE_MIDDLEWARE_SECONDS
else:
cache_kwargs['TIMEOUT'] = cache_timeout
if cache_anonymous_only is None:
self.cache_anonymous_only = getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
else:
self.cache_anonymous_only = cache_anonymous_only
self.cache = get_cache(self.cache_alias, **cache_kwargs)
self.cache_timeout = self.cache.default_timeout
| |
# Natural Language Toolkit: Expectation Maximization Clusterer
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Trevor Cohn <tacohn@cs.mu.oz.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
try:
import numpy
except ImportError:
pass
from nltk.compat import python_2_unicode_compatible
from .util import VectorSpaceClusterer
@python_2_unicode_compatible
class EMClusterer(VectorSpaceClusterer):
"""
The Gaussian EM clusterer models the vectors as being produced by
a mixture of k Gaussian sources. The parameters of these sources
(prior probability, mean and covariance matrix) are then found to
maximise the likelihood of the given data. This is done with the
expectation maximisation algorithm. It starts with k arbitrarily
chosen means, priors and covariance matrices. It then calculates
the membership probabilities for each vector in each of the
clusters; this is the 'E' step. The cluster parameters are then
updated in the 'M' step using the maximum likelihood estimate from
the cluster membership probabilities. This process continues until
the likelihood of the data does not significantly increase.
"""
def __init__(self, initial_means, priors=None, covariance_matrices=None,
conv_threshold=1e-6, bias=0.1, normalise=False,
svd_dimensions=None):
"""
Creates an EM clusterer with the given starting parameters,
convergence threshold and vector mangling parameters.
:param initial_means: the means of the gaussian cluster centers
:type initial_means: [seq of] numpy array or seq of SparseArray
:param priors: the prior probability for each cluster
:type priors: numpy array or seq of float
:param covariance_matrices: the covariance matrix for each cluster
:type covariance_matrices: [seq of] numpy array
:param conv_threshold: maximum change in likelihood before deemed
convergent
:type conv_threshold: int or float
:param bias: variance bias used to ensure non-singular covariance
matrices
:type bias: float
:param normalise: should vectors be normalised to length 1
:type normalise: boolean
:param svd_dimensions: number of dimensions to use in reducing vector
dimensionsionality with SVD
:type svd_dimensions: int
"""
VectorSpaceClusterer.__init__(self, normalise, svd_dimensions)
self._means = numpy.array(initial_means, numpy.float64)
self._num_clusters = len(initial_means)
self._conv_threshold = conv_threshold
self._covariance_matrices = covariance_matrices
self._priors = priors
self._bias = bias
def num_clusters(self):
return self._num_clusters
def cluster_vectorspace(self, vectors, trace=False):
assert len(vectors) > 0
# set the parameters to initial values
dimensions = len(vectors[0])
means = self._means
priors = self._priors
if not priors:
priors = self._priors = numpy.ones(self._num_clusters,
numpy.float64) / self._num_clusters
covariances = self._covariance_matrices
if not covariances:
covariances = self._covariance_matrices = \
[ numpy.identity(dimensions, numpy.float64)
for i in range(self._num_clusters) ]
# do the E and M steps until the likelihood plateaus
lastl = self._loglikelihood(vectors, priors, means, covariances)
converged = False
while not converged:
if trace: print('iteration; loglikelihood', lastl)
# E-step, calculate hidden variables, h[i,j]
h = numpy.zeros((len(vectors), self._num_clusters),
numpy.float64)
for i in range(len(vectors)):
for j in range(self._num_clusters):
h[i,j] = priors[j] * self._gaussian(means[j],
covariances[j], vectors[i])
h[i,:] /= sum(h[i,:])
# M-step, update parameters - cvm, p, mean
for j in range(self._num_clusters):
covariance_before = covariances[j]
new_covariance = numpy.zeros((dimensions, dimensions),
numpy.float64)
new_mean = numpy.zeros(dimensions, numpy.float64)
sum_hj = 0.0
for i in range(len(vectors)):
delta = vectors[i] - means[j]
new_covariance += h[i,j] * \
numpy.multiply.outer(delta, delta)
sum_hj += h[i,j]
new_mean += h[i,j] * vectors[i]
covariances[j] = new_covariance / sum_hj
means[j] = new_mean / sum_hj
priors[j] = sum_hj / len(vectors)
# bias term to stop covariance matrix being singular
covariances[j] += self._bias * \
numpy.identity(dimensions, numpy.float64)
# calculate likelihood - FIXME: may be broken
l = self._loglikelihood(vectors, priors, means, covariances)
# check for convergence
if abs(lastl - l) < self._conv_threshold:
converged = True
lastl = l
def classify_vectorspace(self, vector):
best = None
for j in range(self._num_clusters):
p = self._priors[j] * self._gaussian(self._means[j],
self._covariance_matrices[j], vector)
if not best or p > best[0]:
best = (p, j)
return best[1]
def likelihood_vectorspace(self, vector, cluster):
cid = self.cluster_names().index(cluster)
return self._priors[cluster] * self._gaussian(self._means[cluster],
self._covariance_matrices[cluster], vector)
def _gaussian(self, mean, cvm, x):
m = len(mean)
assert cvm.shape == (m, m), \
'bad sized covariance matrix, %s' % str(cvm.shape)
try:
det = numpy.linalg.det(cvm)
inv = numpy.linalg.inv(cvm)
a = det ** -0.5 * (2 * numpy.pi) ** (-m / 2.0)
dx = x - mean
print(dx, inv)
b = -0.5 * numpy.dot( numpy.dot(dx, inv), dx)
return a * numpy.exp(b)
except OverflowError:
# happens when the exponent is negative infinity - i.e. b = 0
# i.e. the inverse of cvm is huge (cvm is almost zero)
return 0
def _loglikelihood(self, vectors, priors, means, covariances):
llh = 0.0
for vector in vectors:
p = 0
for j in range(len(priors)):
p += priors[j] * \
self._gaussian(means[j], covariances[j], vector)
llh += numpy.log(p)
return llh
def __repr__(self):
return '<EMClusterer means=%s>' % list(self._means)
def demo():
"""
Non-interactive demonstration of the clusterers with simple 2-D data.
"""
from nltk import cluster
# example from figure 14.10, page 519, Manning and Schutze
vectors = [numpy.array(f) for f in [[0.5, 0.5], [1.5, 0.5], [1, 3]]]
means = [[4, 2], [4, 2.01]]
clusterer = cluster.EMClusterer(means, bias=0.1)
clusters = clusterer.cluster(vectors, True, trace=True)
print('Clustered:', vectors)
print('As: ', clusters)
print()
for c in range(2):
print('Cluster:', c)
print('Prior: ', clusterer._priors[c])
print('Mean: ', clusterer._means[c])
print('Covar: ', clusterer._covariance_matrices[c])
print()
# classify a new vector
vector = numpy.array([2, 2])
print('classify(%s):' % vector, end=' ')
print(clusterer.classify(vector))
# show the classification probabilities
vector = numpy.array([2, 2])
print('classification_probdist(%s):' % vector)
pdist = clusterer.classification_probdist(vector)
for sample in pdist.samples():
print('%s => %.0f%%' % (sample,
pdist.prob(sample) *100))
#
# The following demo code is broken.
#
# # use a set of tokens with 2D indices
# vectors = [numpy.array(f) for f in [[3, 3], [1, 2], [4, 2], [4, 0], [2, 3], [3, 1]]]
# # test the EM clusterer with means given by k-means (2) and
# # dimensionality reduction
# clusterer = cluster.KMeans(2, euclidean_distance, svd_dimensions=1)
# print 'Clusterer:', clusterer
# clusters = clusterer.cluster(vectors)
# means = clusterer.means()
# print 'Means:', clusterer.means()
# print
# clusterer = cluster.EMClusterer(means, svd_dimensions=1)
# clusters = clusterer.cluster(vectors, True)
# print 'Clusterer:', clusterer
# print 'Clustered:', str(vectors)[:60], '...'
# print 'As:', str(clusters)[:60], '...'
# print
# # classify a new vector
# vector = numpy.array([3, 3])
# print 'classify(%s):' % vector,
# print clusterer.classify(vector)
# print
# # show the classification probabilities
# vector = numpy.array([2.2, 2])
# print 'classification_probdist(%s)' % vector
# pdist = clusterer.classification_probdist(vector)
# for sample in pdist:
# print '%s => %.0f%%' % (sample, pdist.prob(sample) *100)
if __name__ == '__main__':
demo()
| |
"""
Dummy client runner
This module implements a stand-alone launcher for stress-testing
an Evennia game. It will launch any number of fake clients. These
clients will log into the server and start doing random operations.
Customizing and weighing these operations differently depends on
which type of game is tested. The module contains a testing module
for plain Evennia.
Please note that you shouldn't run this on a production server!
Launch the program without any arguments or options to see a
full step-by-step setup help.
Basically (for testing default Evennia):
- Use an empty/testing database.
- set PERMISSION_PLAYER_DEFAULT = "Builders"
- start server, eventually with profiling active
- launch this client runner
If you want to customize the runner's client actions
(because you changed the cmdset or needs to better
match your use cases or add more actions), you can
change which actions by adding a path to
DUMMYRUNNER_ACTIONS_MODULE = <path.to.your.module>
in your settings. See utils.dummyrunner_actions.py
for instructions on how to define this module.
"""
import sys
import time
import random
from argparse import ArgumentParser
from twisted.conch import telnet
from twisted.internet import reactor, protocol
from twisted.internet.task import LoopingCall
from django.conf import settings
from evennia.utils import mod_import, time_format
# Load the dummyrunner settings module
DUMMYRUNNER_SETTINGS = mod_import(settings.DUMMYRUNNER_SETTINGS_MODULE)
if not DUMMYRUNNER_SETTINGS:
raise IOError("Error: Dummyrunner could not find settings file at %s" %
settings.DUMMYRUNNER_SETTINGS_MODULE)
DATESTRING = "%Y%m%d%H%M%S"
# Settings
# number of clients to launch if no input is given on command line
NCLIENTS = 1
# time between each 'tick', in seconds, if not set on command
# line. All launched clients will be called upon to possibly do an
# action with this frequency.
TIMESTEP = DUMMYRUNNER_SETTINGS.TIMESTEP
# chance of a client performing an action, per timestep. This helps to
# spread out usage randomly, like it would be in reality.
CHANCE_OF_ACTION = DUMMYRUNNER_SETTINGS.CHANCE_OF_ACTION
# spread out the login action separately, having many players create accounts
# and connect simultaneously is generally unlikely.
CHANCE_OF_LOGIN = DUMMYRUNNER_SETTINGS.CHANCE_OF_LOGIN
# Port to use, if not specified on command line
TELNET_PORT = DUMMYRUNNER_SETTINGS.TELNET_PORT or settings.TELNET_PORTS[0]
#
NLOGGED_IN = 0
# Messages
INFO_STARTING = \
"""
Dummyrunner starting using {N} dummy player(s). If you don't see
any connection messages, make sure that the Evennia server is
running.
Use Ctrl-C to stop/disconnect clients.
"""
ERROR_NO_MIXIN = \
"""
Error: Evennia is not set up for dummyrunner. Before starting the
server, make sure to include the following at *the end* of your
settings file (remove when not using dummyrunner!):
from evennia.server.profiling.settings_mixin import *
This will change the settings in the following way:
- change PERMISSION_PLAYER_DEFAULT to 'Immortals' to allow clients
to test all commands
- change PASSWORD_HASHERS to use a faster (but less safe) algorithm
when creating large numbers of accounts at the same time
If you don't want to use the custom settings of the mixin for some
reason, you can change their values manually after the import, or
add DUMMYRUNNER_MIXIN=True to your settings file to avoid this
error completely.
Warning: Don't run dummyrunner on a production database! It will
create a lot of spammy objects and player accounts!
"""
ERROR_FEW_ACTIONS = \
"""
Dummyrunner settings error: The ACTIONS tuple is too short: it must
contain at least login- and logout functions.
"""
HELPTEXT = """
DO NOT RUN THIS ON A PRODUCTION SERVER! USE A CLEAN/TESTING DATABASE!
This stand-alone program launches dummy telnet clients against a
running Evennia server. The idea is to mimic real players logging in
and repeatedly doing resource-heavy commands so as to stress test the
game. It uses the default command set to log in and issue commands, so
if that was customized, some of the functionality will not be tested
(it will not fail, the commands will just not be recognized). The
running clients will create new objects and rooms all over the place
as part of their running, so using a clean/testing database is
strongly recommended.
Setup:
1) setup a fresh/clean database (if using sqlite, just safe-copy
away your real evennia.db3 file and create a new one with
manage.py)
2) in server/conf/settings.py, add
PERMISSION_PLAYER_DEFAULT="Builders"
This is so that the dummy players can test building operations.
You can also customize the dummyrunner by modifying a setting
file specified by DUMMYRUNNER_SETTINGS_MODULE
3) Start Evennia like normal, optionally with profiling (--profile)
4) Run this dummy runner via the evennia launcher:
evennia --dummyrunner <nr_of_clients>
5) Log on and determine if game remains responsive despite the
heavier load. Note that if you do profiling, there is an
additional overhead from the profiler too!j
6) If you use profiling, let the game run long enough to gather
data, then stop the server cleanly using evennia stop or @shutdown.
@shutdown. The profile appears as
server/logs/server.prof/portal.prof (see Python's manual on
cProfiler).
"""
#------------------------------------------------------------
# Helper functions
#------------------------------------------------------------
ICOUNT = 0
def idcounter():
"""
Makes unique ids.
Returns:
count (int): A globally unique counter.
"""
global ICOUNT
ICOUNT += 1
return str(ICOUNT)
GCOUNT = 0
def gidcounter():
"""
Makes globally unique ids.
Returns:
count (int); A globally unique counter.
"""
global GCOUNT
GCOUNT += 1
return "%s-%s" % (time.strftime(DATESTRING), GCOUNT)
def makeiter(obj):
"""
Makes everything iterable.
Args:
obj (any): Object to turn iterable.
Returns:
iterable (iterable): An iterable object.
"""
return obj if hasattr(obj, '__iter__') else [obj]
#------------------------------------------------------------
# Client classes
#------------------------------------------------------------
class DummyClient(telnet.StatefulTelnetProtocol):
"""
Handles connection to a running Evennia server,
mimicking a real player by sending commands on
a timer.
"""
def connectionMade(self):
"""
Called when connection is first established.
"""
# public properties
self.cid = idcounter()
self.key = "Dummy-%s" % self.cid
self.gid = "%s-%s" % (time.strftime(DATESTRING), self.cid)
self.istep = 0
self.exits = [] # exit names created
self.objs = [] # obj names created
self._connected = False
self._loggedin = False
self._logging_out = False
self._report = ""
self._cmdlist = [] # already stepping in a cmd definition
self._login = self.factory.actions[0]
self._logout = self.factory.actions[1]
self._actions = self.factory.actions[2:]
reactor.addSystemEventTrigger('before', 'shutdown', self.logout)
def dataReceived(self, data):
"""
Called when data comes in over the protocol. We wait to start
stepping until the server actually responds
Args:
data (str): Incoming data.
"""
if not self._connected and not data.startswith(chr(255)):
# wait until we actually get text back (not just telnet
# negotiation)
self._connected = True
# start client tick
d = LoopingCall(self.step)
# dissipate exact step by up to +/- 0.5 second
timestep = TIMESTEP + (-0.5 + (random.random()*1.0))
d.start(timestep, now=True).addErrback(self.error)
def connectionLost(self, reason):
"""
Called when loosing the connection.
Args:
reason (str): Reason for loosing connection.
"""
if not self._logging_out:
print "client %s(%s) lost connection (%s)" % (self.key, self.cid, reason)
def error(self, err):
"""
Error callback.
Args:
err (Failure): Error instance.
"""
print err
def counter(self):
"""
Produces a unique id, also between clients.
Returns:
counter (int): A unique counter.
"""
return gidcounter()
def logout(self):
"""
Causes the client to log out of the server. Triggered by ctrl-c signal.
"""
self._logging_out = True
cmd = self._logout(self)
print "client %s(%s) logout (%s actions)" % (self.key, self.cid, self.istep)
self.sendLine(cmd)
def step(self):
"""
Perform a step. This is called repeatedly by the runner and
causes the client to issue commands to the server. This holds
all "intelligence" of the dummy client.
"""
global NLOGGED_IN
rand = random.random()
if not self._cmdlist:
# no commands ready. Load some.
if not self._loggedin:
if rand < CHANCE_OF_LOGIN:
# get the login commands
self._cmdlist = list(makeiter(self._login(self)))
NLOGGED_IN += 1 # this is for book-keeping
print "connecting client %s (%i/%i)..." % (self.key, NLOGGED_IN, NCLIENTS)
self._loggedin = True
else:
# no login yet, so cmdlist not yet set
return
else:
# we always pick a cumulatively random function
crand = random.random()
cfunc = [func for (cprob, func) in self._actions if cprob >= crand][0]
self._cmdlist = list(makeiter(cfunc(self)))
# at this point we always have a list of commands
if rand < CHANCE_OF_ACTION:
# send to the game
self.sendLine(str(self._cmdlist.pop(0)))
self.istep += 1
class DummyFactory(protocol.ClientFactory):
protocol = DummyClient
def __init__(self, actions):
"Setup the factory base (shared by all clients)"
self.actions = actions
#------------------------------------------------------------
# Access method:
# Starts clients and connects them to a running server.
#------------------------------------------------------------
def start_all_dummy_clients(nclients):
"""
Initialize all clients, connect them and start to step them
Args:
nclients (int): Number of dummy clients to connect.
"""
global NCLIENTS
NCLIENTS = int(nclients)
actions = DUMMYRUNNER_SETTINGS.ACTIONS
if len(actions) < 2:
print ERROR_FEW_ACTIONS
return
# make sure the probabilities add up to 1
pratio = 1.0 / sum(tup[0] for tup in actions[2:])
flogin, flogout, probs, cfuncs = actions[0], actions[1], [tup[0] * pratio for tup in actions[2:]], [tup[1] for tup in actions[2:]]
# create cumulative probabilies for the random actions
cprobs = [sum(v for i,v in enumerate(probs) if i<=k) for k in range(len(probs))]
# rebuild a new, optimized action structure
actions = (flogin, flogout) + tuple(zip(cprobs, cfuncs))
# setting up all clients (they are automatically started)
factory = DummyFactory(actions)
for i in range(NCLIENTS):
reactor.connectTCP("localhost", TELNET_PORT, factory)
# start reactor
reactor.run()
#------------------------------------------------------------
# Command line interface
#------------------------------------------------------------
if __name__ == '__main__':
try:
settings.DUMMYRUNNER_MIXIN
except AttributeError:
print ERROR_NO_MIXIN
sys.exit()
# parsing command line with default vals
parser = ArgumentParser(description=HELPTEXT)
parser.add_argument("-N", nargs=1, default=1, dest="nclients",
help="Number of clients to start")
args = parser.parse_args()
print INFO_STARTING.format(N=args.nclients[0])
# run the dummyrunner
t0 = time.time()
start_all_dummy_clients(nclients=args.nclients[0])
ttot = time.time() - t0
# output runtime
print "... dummy client runner stopped after %s." % time_format(ttot, style=3)
| |
# -*- coding: utf-8 -*-
'''
Created on Jan 04, 2013
@author: Mourad Mourafiq
About: This is an attempt to solve the Quora challenge Typehead.
'''
import re
import copy
import datetime
COMMANDS = "(ADD)|(DEL)|(W?QUERY)"
ANY_STRING = "(\\S*.*)"
SEPARATORS = "(?: |\\t)"
IDS = "\\w+"
TYPES = "user|topic|question|board"
FLOATS = "[0-9]+(?:.[0-9]*)?"
INTS = "[0-9]+"
BOOSTS = "((?:" + TYPES + "|(?:" + IDS + ")):" + FLOATS + SEPARATORS + ")*"
ANY_COMMAND = "(?P<command>" + COMMANDS + ")" + SEPARATORS + "(?P<parameters>" + ANY_STRING + ")"
ADD_COMMAND = "(?P<type>" + TYPES + ")" + SEPARATORS + \
"(?P<id>" + IDS + ")" + SEPARATORS + \
"(?P<score>" + FLOATS + ")" + SEPARATORS + \
"(?P<content>" + ANY_STRING + ")"
DEL_COMMAND = "(?P<id>" + IDS + ")"
QUERY_COMMAND = "(?P<nbr_results>" + INTS +")" + SEPARATORS + \
"(?P<query>" + ANY_STRING + ")"
WQUERY_COMMAND = "(?P<nbr_results>" + INTS +")" + SEPARATORS + \
"(?P<nbr_boosts>" + INTS +")" + SEPARATORS + \
"(?P<boosts>" + BOOSTS + ")" + \
"(?P<query>" +ANY_STRING + ")"
COMMAND_MATCHER = re.compile(ANY_COMMAND)
ADD_COMMAND_MATCHER = re.compile(ADD_COMMAND)
DEL_COMMAND_MATCHER = re.compile(DEL_COMMAND)
QUERY_COMMAND_MATCHER = re.compile(QUERY_COMMAND)
WQUERY_COMMAND_MATCHER = re.compile(WQUERY_COMMAND)
NOK = "{'':[]}"
class Prefixer():
def __init__(self):
self.__data = {}
def __repr__(self):
return 'Prefixer(%s)' % (self.__data,)
def __eq__(self, other):
return self.__data == other.__data
def get_data(self):
return self.__data
def insert(self, word, item_id):
node = self.__data
while word:
prefix, key = self.longest_prefix(word, node.keys())
if not prefix:
break
len_prefix = len(prefix)
if prefix != key:
# split key into prefix:suffix, move data
suffix = key[len_prefix:]
current_node = node[key]
node[prefix] = {suffix:current_node}
del node[key]
word = word[len_prefix:]
node = node[prefix]
if word:
node[word] = eval(NOK)
node[word][''].append(item_id)
else:
try:
node[word].append(item_id)
except:
node[word] = []
node[word].append(item_id)
return True
def remove(self, word, item_id):
node = self.__data
while word:
prefix, key = self.longest_prefix(word, node.keys())
if not prefix:
return False
node = node.get(prefix, None)
if not node:
return False
word = word[len(prefix):]
try:
node[''].remove(item_id)
return True
except:
return False
def _search_dico(self, word):
node = self.__data
while word:
prefix, key = self.longest_prefix(word, node.keys())
if not prefix:
return False
if not key:
return False
if prefix != key:
if prefix == word:
return node[key]
else:
return False
node = node[prefix]
word = word[len(prefix):]
return node
def search(self, word):
dico = self._search_dico(word)
if dico != False:
return self.traverse_dico(dico)
return []
@staticmethod
def traverse_dico(dico):
results = []
for key, value in dico.iteritems():
if key == '':
results += value
else:
results += Prefixer.traverse_dico(value)
return results
@staticmethod
def longest_prefix(word, candidates):
"""
return the longest prefix match between word and any of the
candidates, if any. Only one candidate will much.
"""
if word:
wc = word[0]
for c in candidates:
if c.startswith(wc):
for i in reversed(xrange(1, min(len(word), len(c))+1)):
if c.startswith(word[:i]):
return (word[:i], c)
return ('', None)
class TypeHead(object):
"""
typehead object that manages all items
@type items: dict
@param items: dict of {id : item}
"""
def __init__(self):
self.items = {}
self.prefixer = Prefixer()
def _add(self, item):
item_id = item.id
item_content = item.content
#add item to the dict
self.items[item_id] = item
tokens = re.split(SEPARATORS, item_content.lower())
#add tokens to the prefixer
for token in tokens:
self.prefixer.insert(token, item_id)
def _delete(self, item_id):
item_content = self.items[item_id].content
#delete the item from the dict
del self.items[item_id]
tokens = re.split(SEPARATORS, item_content.lower())
#remove items from the prefixer for each token
for token in tokens:
self.prefixer.remove(token, item_id)
def _set_items_query(self, query):
items_ids = set()
tokens = re.split(SEPARATORS, query.lower())
cpt = True
for token in tokens:
if cpt:
items_ids = set(self.prefixer.search(token))
else:
items_ids = items_ids.intersection(set(self.prefixer.search(token)))
if items_ids == set():
return items_ids
cpt = False
return items_ids
def _query(self, nbr_results, query):
#collect potential items' ids
items_ids = self._set_items_query(query)
#check if items_ids is not empty
if items_ids == set():
return ""
#rank them according to the scoring method
sorted_results = SortedItems(nbr_results)
for item_id in items_ids:
sorted_results.add(self.items[item_id])
return sorted_results
def _wquery(self, nbr_results, nbr_boosts, boosts, query):
nbr_boosts = int(nbr_boosts)
#collect potential items' ids
items_ids = self._set_items_query(query)
#check if items_ids is not empty
if items_ids == set():
return ""
#check the boosts and create boosts_dict
boosts_dict = {}
if nbr_boosts > 0:
boosts = boosts.split()
for boost in boosts:
type, score = boost.split(':')
boosts_dict[type] = float(score)
#rank them according to the scoring method
sorted_results = SortedItems(nbr_results)
for item_id in items_ids:
item = copy.deepcopy(self.items[item_id])
#chech the boost
if nbr_boosts > 0:
if item.id in boosts_dict.keys():
item.score *= boosts_dict[item.id]
if item.type in boosts_dict.keys():
item.score *= boosts_dict[item.type]
sorted_results.add(item)
return sorted_results
def process_command(self, in_command):
"""
validate the current command and map it to the right function
"""
any_command = COMMAND_MATCHER.match(in_command)
#
if any_command:
command = any_command.group("command")
parameters = any_command.group("parameters")
if (command == "ADD"):
add_command = ADD_COMMAND_MATCHER.match(parameters)
self._add(Item(add_command.group("type"), add_command.group("id"),
add_command.group("score"), add_command.group("content")))
elif (command == "DEL"):
del_command = DEL_COMMAND_MATCHER.match(parameters)
self._delete(del_command.group("id"))
elif (command == "QUERY"):
query_command = QUERY_COMMAND_MATCHER.match(parameters)
results = self._query(query_command.group("nbr_results"), query_command.group("query"))
print results
elif (command == "WQUERY"):
wquery_command = WQUERY_COMMAND_MATCHER.match(parameters)
results = self._wquery(wquery_command.group("nbr_results"), wquery_command.group("nbr_boosts"),
wquery_command.group("boosts"), wquery_command.group("query"))
print results
class Item(object):
"""
either a topic, a user, a board or a question
@type type: str
@param type: The item's type.
@type id: str
@param id: The item's id.
@type score: float
@param score: The item's score.
@type content: str
@param contetn: The item's content.
@type time: time
@param time: The item's time of creation.
"""
def __init__(self, type, id, score, content):
self.type = type
self.id = id
self.score = float(score)
self.content = content
self.time = datetime.datetime.now()
def __repr__(self):
return self.id
def better_than(self, item):
"""
compare the current item to the input item.
follows this method:
. highest score goes first.
. same score; time FIFO.
return true if the current item is better than the input, otherwise returns false
"""
if (self.score > item.score):
return True
if (self.score < item.score):
return False
return True if (self.time > item.time) else False
class SortedItems(object):
"""
Keeps a list of sorted elements depending on the scoring method.
@type items: list
@param items: the list sorted items
@type max_size: int
@param max_sier: the max size of the list (-1 means unlimited number of items)
"""
def __init__(self, max_size=-1):
self.items = []
self.max_size = int(max_size)
def __repr__(self):
return " ".join([item.id for item in self.items])
def set_max_size(self, max_size):
self.max_size = int(max_size)
def add(self, item):
"""
add new item to the list of items.
if the list is full, add the item only if it has better score than at least one item
in the list, and pop the item with the worst score.
"""
items_l = len(self.items)
pos = items_l
for i in xrange(items_l):
if (item.better_than(self.items[i])):
pos = i
break
if (self.max_size < 0 or pos < self.max_size):
temp = self.items[:pos]
temp.append(item)
temp += self.items[pos:]
self.items = temp
#now in the case of exceeding max_size
if (self.max_size > 0 and (items_l+1)>self.max_size):
self.items.pop()
t = TypeHead()
N = int(raw_input())
while(N):
t.process_command(raw_input())
N -= 1
| |
import copy
import sys
from rodney import sprites
import rodney.ai as ai
def make_framebuffer(width, height):
"""
A framebuffer is just a big 2D list (coordinates are [y][x])
:param width: Width of the framebuffer
:param height: Height of the framebuffer
:return: Empty framebufer
"""
# Since the framebuffer might be changed via script we need to account for that
if width < 1 or height < 1:
raise ValueError('Framebuffer too small: %i, %i' % (width, height))
fb = []
for y in range(height):
fb.append([' '] * width)
return fb
def clear_framebuffer(fb):
"""
Clear framebuffer (fill with spaces)
:param fb: Framebuffer to operate on
"""
for y in range(len(fb)):
for x in range(len(fb[0])):
fb[y][x] = ' '
def print_framebuffer(fb):
"""
Print framebuffer
:param fb: Framebuffer
"""
result = ""
for line in fb:
result += "".join(line)
print(result, end="")
def draw_background(offset: int, bg, fb):
"""
Draw background into framebuffer, overwrites everything
:param offset: Vertical offset, wraps automatically
:param bg: Background data
:param fb: Foreground data
"""
# Wrap around the bg
offset %= len(bg)
# Has to be drawn first, overwrites everything
fb_height = len(fb)
fb_width = len(fb[0])
bg_width = len(bg)
"""
# This is only relevant if we ever make a cross fade between different backgrounds
for y in range(fb_height):
for x in range(min(fb_width, fb_width + (bg_width - (offset + fb_width)))):
fb[y][x] = bg[x + offset][0][y]
"""
for y in range(fb_height):
for x in range(fb_width):
fb[y][x] = bg[(x + offset) % bg_width][0][y]
class Sprite:
"""
Everything that moves is a sprite
Notes:
Interesting things about shallow/deep copy
http://stackoverflow.com/questions/3975376/understanding-dict-copy-shallow-or-deep
Difference list/tuple
http://stackoverflow.com/questions/626759/whats-the-difference-between-list-and-tuples
"""
def __init__(self, data, info, x_start, y_start, unique_sprite_name=None):
"""
Constructor
Sets up the following fields in the local info dict:
Initialize health to 'health_max'
Set position (x and y)
Set ai_last_move to 0 (commonly used by the ai)
:param data: Sprite 'graphics'
:param info: Info array, see sprites.py for details
:param x_start: Start position
:param y_start: Start position
:param unique_sprite_name: A unique name that might be filtered for by running scripts
"""
self.data = copy.deepcopy(data)
self.info = copy.deepcopy(info)
self.unique_sprite_name = unique_sprite_name
# Setup health
if not self.info['is_projectile']:
self.info['health'] = self.info['health_max']
# Setup position
self.info['x'] = x_start
self.info['y'] = y_start
# Setup ai variables
self.info['ai_last_move'] = 0 # Marks the number of frames since last action, ai has to increase it if used
def draw_sprite(self, global_frame_number, fb):
"""
Draw this sprite into the given framebuffer
:param global_frame_number: Wrapping framecounter used to determine current frame
:param fb: Framebuffer to draw to
"""
# Add invincibility flickering (don't draw if we are on an uneven frame)
if 'invincibility' in self.info and self.info['invincibility'] != 0 and global_frame_number%2 == 1:
return
# Process sprite
frame_data = self.data[(global_frame_number // self.info['anim_speed']) % self.info['frames']]
fb_height = len(fb)
fb_width = len(fb[0])
# Copy data into fb
for sprite_y in range(min(fb_height - self.info['y'], self.info['height'])):
# TODO-SPEEDUP can a partial list be copied instead of each element separately
for sprite_x in range(min(fb_width - self.info['x'], self.info['width'])):
fb[self.info['y'] + sprite_y][self.info['x'] + sprite_x] = frame_data[sprite_y][sprite_x]
class SpriteManager:
"""
Very simple sprite manager, everything is a sprite: player, projectile, boss, doesn't matter
Delegates all the work to the actual ai methods
"""
def __init__(self):
""" Constructor """
self.sprites = []
def add_sprite(self, sprite_name: str, x: int, y: int, unique_sprite_name=None):
# TODO-FEATURE does not check for collision of unique_sprite_name names
sprite_data = getattr(sprites, 'spr_' + sprite_name)
sprite_info = getattr(sprites, 'info_' + sprite_name)
self.sprites.append(Sprite(sprite_data, sprite_info, x, y, unique_sprite_name))
def get_unique_sprite(self, name: str):
"""
Pointless and probably unused since it throws an exception, TODO make it useful
:param name: Name of character to search for
"""
# We have to iterate by hand since the name is an attribute
for sprite in self.sprites:
if sprite.unique_sprite_name == name:
return sprite
raise KeyError("Unique sprite %s not found" % name)
def delete_sprite(self, sprite: Sprite):
"""
Delete a sprite, no special cleanup is done
Any bullets or other interactions with the environment persist
:param sprite: Sprite object
"""
# Guard against already removed objects, check collision might call delete with an invalid object
# TODO fix this behaviour in check_collision
if sprite in self.sprites:
self.sprites.remove(sprite)
def advance_ai(self, fb, global_frame_number: int):
"""
AI can add more fields to the info part of a sprite, all should be prefixed with 'ai_'
:param global_frame_number: A rotating counter, mostly used for sprite animation
"""
for sprite in self.sprites:
# AI of each sprite
current_ai = getattr(ai, sprite.info['ai'])
current_ai(self, sprite, fb, global_frame_number)
def check_collision(self, fb):
"""
Check collision between all sprites in this sprite manager
:param fb: For framebuffer size
"""
fb_height = len(fb)
fb_width = len(fb[0])
# Check if sprite is out of bounds, if so, remove it
for sprite in self.sprites:
if sprite.info['x'] < 0 or sprite.info['y'] < 0 or \
sprite.info['x'] > fb_width - 1 or sprite.info['y'] > fb_height - 1:
self.delete_sprite(sprite)
# TODO improve collision system for multiple classes
"""
The current collision system works by having 4 types of objects:
case 1:
-is_projectile: True, no player_projectile tag: Enemy projectile, collides with player
-is_projectile: False, not the player: Collides with player
case 2:
-is_projectile: True, player_projectile tag: Rodney bullet, collides with enemies
"""
# This is quite hacky TODO do this nicely
# Currently collision is between all objects (so it can be extended easily), but won't scale well
for sprite in self.sprites:
# Enemy projectiles + non-unique enemies (not bosses) collide with player
if sprite.unique_sprite_name is None and 'player_projectile' not in sprite.info:
for other_sprite in self.sprites:
# Check if it's another sprite
if other_sprite != sprite and other_sprite.unique_sprite_name == 'rodney':
# Check collision box
if sprite.info['x'] < other_sprite.info['x'] + other_sprite.info['width'] and \
sprite.info['x'] + sprite.info['width'] > other_sprite.info['x'] and \
sprite.info['y'] < other_sprite.info['y'] + other_sprite.info['height'] and \
sprite.info['height'] + sprite.info['y'] > other_sprite.info['y']:
rodney = other_sprite
if 'invincibility' in rodney.info and rodney.info['invincibility'] <= 0:
# TODO add damage method for sprite
rodney.info['health'] = max(rodney.info['health']-sprite.info['damage'], 0)
rodney.info['invincibility'] = 15
# Since bullets don't have health they are removed by hand
self.delete_sprite(sprite)
# Player projectiles collide with enemies
elif 'player_projectile' in sprite.info:
for other_sprite in self.sprites:
# Player projectile collides with enemies and unique sprites (except rodney)
if not other_sprite.info['is_projectile'] and other_sprite.unique_sprite_name != 'rodney':
# Check if it's another sprite
if other_sprite != sprite:
# Check collision box
if sprite.info['x'] < other_sprite.info['x'] + other_sprite.info['width'] and \
sprite.info['x'] + sprite.info['width'] > other_sprite.info['x'] and \
sprite.info['y'] < other_sprite.info['y'] + other_sprite.info['height'] and \
sprite.info['height'] + sprite.info['y'] > other_sprite.info['y']:
# Deal damage to enemy, remove projectile sprite, enemies get no invincibility
other_sprite.info['health'] -= sprite.info['damage']
self.delete_sprite(sprite)
# Check if sprite is 'dead'
if 'health' in sprite.info and sprite.info['health'] <= 0:
if sprite.unique_sprite_name == 'razmi':
# Find rodney and set his flag for winning the game
# TODO this is hacky, since we don't have scripts there is no other way
for potential_rodney in self.sprites:
if potential_rodney.unique_sprite_name == 'rodney':
potential_rodney.info['ai_game_won'] = True
self.delete_sprite(sprite)
# Check if sprite is out of bounds, if so, remove it
# TODO this method is called twice because we want to be extra sure that we don't render anything that is offscreen
# TODO also the checking is rudimentary, only sprites that are fully offscreen should disappear -.-
for sprite in self.sprites:
if sprite.info['x'] < 0 or sprite.info['y'] < 0 or \
sprite.info['x'] > fb_width - 1 or sprite.info['y'] > fb_height - 1:
self.delete_sprite(sprite)
def draw_all(self, global_frame_number: int, fb):
"""
Draw all sprites in this sprite manager
:param global_frame_number: Needed for drawing the sprites
:param fb: The framebuffer to write to
"""
for sprite in self.sprites:
sprite.draw_sprite(global_frame_number, fb)
| |
"""
Bbox Target Operator
select foreground and background proposal and encode them as training target.
"""
import mxnet as mx
import numpy as np
import numpy.random as npr
from ast import literal_eval
from operator_py.detectron_bbox_utils import bbox_overlaps, bbox_transform_inv
def _sample_proposal(proposals, gt_bboxes, image_rois, fg_fraction, fg_thresh, bg_thresh_hi,
bg_thresh_lo, inv_stds, num_reg_class, xywh):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
num_gt = gt_bboxes.shape[0]
num_proposal = proposals.shape[0]
ignore_label = -2
valid_gt_index = np.where(gt_bboxes[:, 4] != ignore_label)[0]
gt_bboxes = gt_bboxes[valid_gt_index]
if gt_bboxes.shape[0] != 0:
proposal_to_gt_overlaps = bbox_overlaps(
proposals.astype(np.float32, copy=False),
gt_bboxes.astype(np.float32, copy=False)
)
else:
proposal_to_gt_overlaps = np.zeros((num_proposal, 1))
proposal_assigned_gt_index = proposal_to_gt_overlaps.argmax(axis=1)
proposal_assigned_class = gt_bboxes[:, 4][proposal_assigned_gt_index]
proposal_max_overlap_w_gt = proposal_to_gt_overlaps.max(axis=1)
rois_per_image = image_rois
fg_rois_per_image = int(np.round(fg_fraction * rois_per_image))
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(proposal_max_overlap_w_gt >= fg_thresh)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False
)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where(
(proposal_max_overlap_w_gt < bg_thresh_hi) &
(proposal_max_overlap_w_gt >= bg_thresh_lo)
)[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False
)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Label is the class each RoI has max overlap with
sampled_labels = proposal_assigned_class[keep_inds]
sampled_labels[fg_rois_per_this_image:] = 0 # Label bg RoIs with class 0
sampled_proposals = proposals[keep_inds]
sampled_gt_bboxes = gt_bboxes[proposal_assigned_gt_index[keep_inds]]
bbox_targets = bbox_transform_inv(sampled_proposals, sampled_gt_bboxes, inv_stds)
bbox_class = sampled_labels[:, None]
if num_reg_class == 2:
bbox_class = np.array(bbox_class > 0, dtype=bbox_targets.dtype)
bbox_targets_with_class = np.concatenate([bbox_class, bbox_targets], axis=1)
bbox_targets, bbox_weights = _expand_bbox_targets(bbox_targets_with_class, num_reg_class)
return sampled_proposals, sampled_labels, bbox_targets, bbox_weights
def _expand_bbox_targets(bbox_target_data, num_bbox_reg_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_bbox_reg_classes))
bbox_weights = np.zeros(bbox_targets.shape)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = int(clss[ind])
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_weights[ind, start:end] = (1.0, 1.0, 1.0, 1.0)
return bbox_targets, bbox_weights
class BboxTargetOperator(mx.operator.CustomOp):
def __init__(self, num_classes, add_gt_to_proposal, image_rois, fg_fraction,
fg_thresh, bg_thresh_hi, bg_thresh_lo, bbox_target_std, xywh):
super().__init__()
self._num_classes = num_classes
self._add_gt_to_proposal = add_gt_to_proposal
self._image_rois = image_rois
self._fg_fraction = fg_fraction
self._fg_thresh = fg_thresh
self._bg_thresh_hi = bg_thresh_hi
self._bg_thresh_lo = bg_thresh_lo
self._bbox_target_std = bbox_target_std
self._xywh = xywh
def forward(self, is_train, req, in_data, out_data, aux):
proposals = in_data[0].asnumpy() # N x K x 4
gt_bboxes = in_data[1].asnumpy() # N x M x 5
batch_image = proposals.shape[0]
image_rois = self._image_rois
fg_fraction = self._fg_fraction
fg_thresh = self._fg_thresh
bg_thresh_hi = self._bg_thresh_hi
bg_thresh_lo = self._bg_thresh_lo
inv_stds = list(1.0 / std for std in self._bbox_target_std)
num_reg_class = self._num_classes
xywh = self._xywh
keep_proposals = []
keep_gt_bboxes = []
# clean up gt_bbox
for im_gt_bbox in gt_bboxes:
valid = np.where(im_gt_bbox[:, 4] != -1)[0] # class == -1 indicates padding
keep_gt_bboxes.append(im_gt_bbox[valid])
# clean up proposal
for im_proposal in proposals:
valid = np.where(im_proposal[:, -1] != 0)[0] # y2 == 0 indicates padding
keep_proposals.append(im_proposal[valid])
if self._add_gt_to_proposal:
for i in range(batch_image):
im_proposal, im_gt_bbox = keep_proposals[i], keep_gt_bboxes[i]
keep_proposals[i] = np.append(im_proposal, im_gt_bbox[:, :4], axis=0)
sampled_proposal, bbox_class, bbox_target, bbox_target_weight = [], [], [], []
for i in range(batch_image):
output = _sample_proposal(
keep_proposals[i],
keep_gt_bboxes[i],
image_rois,
fg_fraction,
fg_thresh,
bg_thresh_hi,
bg_thresh_lo,
inv_stds,
num_reg_class,
xywh
)
sampled_proposal_i, bbox_class_i, bbox_target_i, bbox_target_weight_i = output
sampled_proposal.append(sampled_proposal_i)
bbox_class.append(bbox_class_i)
bbox_target.append(bbox_target_i)
bbox_target_weight.append(bbox_target_weight_i)
sampled_proposal = np.array(sampled_proposal, dtype=np.float32)
bbox_class = np.array(bbox_class, dtype=np.float32)
bbox_target = np.array(bbox_target, dtype=np.float32)
bbox_target_weight = np.array(bbox_target_weight, dtype=np.float32)
for i, val in enumerate([sampled_proposal, bbox_class, bbox_target, bbox_target_weight]):
self.assign(out_data[i], req[i], val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
self.assign(in_grad[1], req[1], 0)
@mx.operator.register('bbox_target')
class BboxTargetProp(mx.operator.CustomOpProp):
def __init__(self, num_class, add_gt_to_proposal, image_rois, fg_fraction, fg_thresh,
bg_thresh_hi, bg_thresh_lo, bbox_target_std, xywh='True'):
super().__init__(need_top_grad=False)
self._num_class = int(num_class)
self._add_gt_to_proposal = literal_eval(add_gt_to_proposal)
self._image_rois = int(image_rois)
self._fg_fraction = float(fg_fraction)
self._fg_thresh = float(fg_thresh)
self._bg_thresh_hi = float(bg_thresh_hi)
self._bg_thresh_lo = float(bg_thresh_lo)
self._bbox_target_std = literal_eval(bbox_target_std)
self._xywh = literal_eval(xywh)
if self._xywh:
print('bbox_target encode type: xywh')
else:
print('bbox_target encode type: xyxy')
def list_arguments(self):
return ['proposal', 'gt_bbox']
def list_outputs(self):
return ['sampled_proposal', 'bbox_cls', 'bbox_target', 'bbox_target_weight']
def infer_shape(self, in_shape):
rpn_rois_shape = in_shape[0]
gt_boxes_shape = in_shape[1]
batch_image = rpn_rois_shape[0]
sampled_proposal_shape = (batch_image, self._image_rois, 4)
bbox_cls_shape = (batch_image, self._image_rois, )
bbox_target_shape = (batch_image, self._image_rois, self._num_class * 4)
bbox_weight_shape = (batch_image, self._image_rois, self._num_class * 4)
return [rpn_rois_shape, gt_boxes_shape], \
[sampled_proposal_shape, bbox_cls_shape, bbox_target_shape, bbox_weight_shape]
def create_operator(self, ctx, shapes, dtypes):
return BboxTargetOperator(
self._num_class,
self._add_gt_to_proposal,
self._image_rois,
self._fg_fraction,
self._fg_thresh,
self._bg_thresh_hi,
self._bg_thresh_lo,
self._bbox_target_std,
self._xywh
)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
| |
from neurons import spiking
__author__ = 'johannes'
import pytest
import numpy as np
class TestVarious:
def test_neuron_no_spike(self):
# Neuron should not spike
timesteps = 20
# Two neurons
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
# Neuron 1 is connected to Neuron 2 with weight 1
weights = np.array([[0, 1], [0, 0]])
# Empty spiketrain of length 'timesteps'
spiketrain = np.zeros((2, timesteps), dtype=bool)
current = spiking_model.check_spikes(spiketrain, weights, 19)
# The outcoming current on both neurons should be zero
assert np.array_equal(current, np.array([0, 0]))
def test_negative_weight(self):
# If weights are negative enough, neuron should not spike
# The same code as test_neuron_spike but with negative weight!
timesteps = 20
# Two neurons
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
# Neuron 1 is connected to Neuron 2 with weight -1
weights = np.array([[0, -1], [0, 0]])
# Empty spiketrain of length 'timesteps'
spiketrain = np.zeros((2, timesteps), dtype=bool)
# Neuron 1 Spikes all the time :)
spiketrain[0,:] = 1
current = spiking_model.check_spikes(spiketrain, weights, 19)
# The outcoming current on Neuron 1 should be 0
# on Neuron 2 it should be negative
assert current[0] == 0
assert current[1] < 0
def test_neuron_spike(self):
# Neuron should spike
timesteps = 20
# Two neurons
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
# Neuron 1 is connected to Neuron 2 with weight 1
weights = np.array([[0, 1], [0, 0]], dtype=bool)
# Empty spiketrain of length 'timesteps'
spiketrain = np.zeros((2, timesteps))
# Neuron 1 Spikes all the time :)
spiketrain[0,:] = 1
current = spiking_model.check_spikes(spiketrain, weights, 19)
# The outcoming current on Neuron 1 should be 0
# on Neuron 2 it should be positive
assert current[0] == 0
assert current[1] > 0
def test_potential_cross_from_below(self):
''' Tests if there is only a spike if the potential hits the threshold from below '''
threshold = 1.0
timesteps = 20
spiking_model = spiking.SRM(neurons=2, threshold=threshold, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
weights = np.array([[0, 1], [0, 0]])
# Empty spiketrain of length 'timesteps'
spiketrain = np.zeros((2, timesteps), dtype=bool)
# Neuron 1 Spikes all the time :)
spiketrain[0,:] = 1
current = []
for t in range(timesteps):
current.append(spiking_model.check_spikes(spiketrain, weights, t))
# Potential of 2nd neurons is over the threshold at the last timestep, but does not spike
assert current[19][1] >= threshold
assert not spiketrain[1, 19]
# More elaborated assertions:
for t in range(timesteps):
if spiketrain[1, t]:
assert current[t-1][1] < threshold
assert current[t][1] >= threshold
else:
assert current[t][1] < threshold or (t >= 1 and current[t-1][1] >= threshold)
def test_rerun_spike_check(self):
''' If we rerun spikecheck with the same time, we should get the same results '''
rerun_time = 15
timesteps = 20
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
weights = np.array([[0, 1], [0, 0]])
# Neuron 1 Spikes at 0 and 5 ms
spiketrain = np.zeros((2, timesteps), dtype=bool)
spiketrain[0,(0, 5)] = True
# Run for the first time
current = []
for t in range(timesteps):
current.append(spiking_model.check_spikes(spiketrain, weights, t))
current_rerun = spiking_model.check_spikes(spiketrain, weights, rerun_time - 1)
assert np.array_equal(current[rerun_time - 1] - current_rerun, np.array([0, 0]))
def test_different_time_constants(self):
# Each neuron has different time constants
pass
class TestShouldFail:
def test_wrong_spiketrain_size(self):
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
# Empty spiketrain is too short
spiketrain1 = np.zeros((2, 20))
# Neuron 1 is connected to Neuron 2 with weight 1
weights = np.array([[0, 1], [0, 0]], dtype=bool)
with pytest.raises(ValueError) as e:
current = spiking_model.check_spikes(spiketrain1, weights, 20)
assert "Spiketrain too short (0ms -- 19ms) for simulating time 20" in str(e.value)
def test_simulate_wrong_types(self):
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
spiketrain1 = np.zeros((2, 21))
weights = np.array([[0, 1], [0, 0]], dtype=bool)
# Spiketrain is not a numpy array
with pytest.raises(ValueError) as e:
current = spiking_model.check_spikes([0,0,0], weights, 20)
# Weights is not a matrix
with pytest.raises(ValueError) as e:
current = spiking_model.check_spikes(spiketrain1, [[0,1],[0,0]], 20)
# Time is not a int
with pytest.raises(ValueError) as e:
current = spiking_model.check_spikes(spiketrain1, weights, [20, 13])
assert "Variable t should be int or convertible to int" in str(e.value)
def test_wrong_weight_size(self):
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
spiketrain1 = np.zeros((2, 21))
# Wrong weights
weights = np.array([[0, 1], [0, 0], [0, 0]], dtype=bool)
with pytest.raises(ValueError) as e:
current = spiking_model.check_spikes(spiketrain1, weights, 20)
assert "Weigths should be a quadratic matrix" in str(e.value)
def test_wrong_additional_term_size(self):
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
spiketrain1 = np.zeros((2, 21))
# Wrong weights
weights = np.array([[0, 1], [0, 0]], dtype=bool)
additional_term = np.array([1, 2, 3])
with pytest.raises(ValueError) as e:
current = spiking_model.check_spikes(spiketrain1, weights, 20, additional_term=additional_term)
assert "Additional_term should be a vector with one element for each neuron" in str(e.value)
def test_wrong_time_too_small(self):
# Simulate a time that is too small
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
spiketrain1 = np.zeros((2, 20))
weights = np.array([[0, 1], [0, 0]], dtype=bool)
with pytest.raises(ValueError) as e:
current = spiking_model.check_spikes(spiketrain1, weights, -1)
assert "Time to be simulated is too small" in str(e.value)
def test_wrong_number_of_constants(self):
# 3 Neurons, 3 different t_s, but only 2 different t_m
with pytest.raises(ValueError) as e:
spiking_model = spiking.SRM(neurons=3, threshold=1.0, t_current=[0.3, 0.2, 0.3],
t_membrane=[0.2, 0.5], eta_reset=[0.5, 0.5, 0.6], verbose=False)
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the Oppia exploration learner view."""
from __future__ import annotations
import json
import logging
import random
from core import feconf
from core import utils
from core.constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.controllers import domain_objects_validator
from core.controllers import editor
from core.domain import collection_services
from core.domain import config_domain
from core.domain import event_services
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import interaction_registry
from core.domain import learner_progress_services
from core.domain import moderator_services
from core.domain import question_services
from core.domain import rating_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import skill_services
from core.domain import stats_domain
from core.domain import stats_services
from core.domain import story_fetchers
from core.domain import summary_services
from core.domain import user_services
MAX_SYSTEM_RECOMMENDATIONS = 4
def _does_exploration_exist(exploration_id, version, collection_id):
"""Returns if an exploration exists.
Args:
exploration_id: str. The ID of the exploration.
version: int or None. The version of the exploration.
collection_id: str. ID of the collection.
Returns:
bool. True if the exploration exists False otherwise.
"""
exploration = exp_fetchers.get_exploration_by_id(
exploration_id, strict=False, version=version)
if exploration is None:
return False
if collection_id:
collection = collection_services.get_collection_by_id(
collection_id, strict=False)
if collection is None:
return False
return True
class ExplorationEmbedPage(base.BaseHandler):
"""Page describing a single embedded exploration."""
@acl_decorators.can_play_exploration
def get(self, exploration_id):
"""Handles GET requests.
Args:
exploration_id: str. The ID of the exploration.
"""
version_str = self.request.get('v')
version = int(version_str) if version_str else None
# Note: this is an optional argument and will be None when the
# exploration is being played outside the context of a collection.
collection_id = self.request.get('collection_id')
# This check is needed in order to show the correct page when a 404
# error is raised. The self.request.get('iframed') part of the check is
# needed for backwards compatibility with older versions of the
# embedding script.
if (feconf.EXPLORATION_URL_EMBED_PREFIX in self.request.uri or
self.request.get('iframed')):
self.iframed = True
if not _does_exploration_exist(exploration_id, version, collection_id):
raise self.PageNotFoundException
self.iframed = True
self.render_template(
'exploration-player-page.mainpage.html', iframe_restriction=None)
class ExplorationPage(base.BaseHandler):
"""Page describing a single exploration."""
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'v': {
'schema': {
'type': 'int',
'validators': [{
'id': 'is_at_least',
# Version must be greater than zero.
'min_value': 1
}]
},
'default_value': None
},
'parent': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'iframed': {
'schema': {
'type': 'bool'
},
'default_value': None
},
'collection_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
},
'default_value': None
}
}
}
@acl_decorators.can_play_exploration
def get(self, exploration_id):
"""Handles GET requests.
Args:
exploration_id: str. The ID of the exploration.
"""
version = self.normalized_request.get('v')
if self.normalized_request.get('iframed'):
redirect_url = '/embed/exploration/%s' % exploration_id
if version:
redirect_url += '?v=%s' % version
self.redirect(redirect_url)
return
# Note: this is an optional argument and will be None when the
# exploration is being played outside the context of a collection or if
# the 'parent' parameter is present.
if self.normalized_request.get('parent'):
collection_id = None
else:
collection_id = self.normalized_request.get('collection_id')
if not _does_exploration_exist(exploration_id, version, collection_id):
raise self.PageNotFoundException
self.render_template('exploration-player-page.mainpage.html')
class ExplorationHandler(base.BaseHandler):
"""Provides the initial data for a single exploration."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'v': {
'schema': {
'type': 'int',
'validators': [{
'id': 'is_at_least',
# Version must be greater than zero.
'min_value': 1
}]
},
'default_value': None
}
}
}
@acl_decorators.can_play_exploration
def get(self, exploration_id):
"""Populates the data on the individual exploration page.
Args:
exploration_id: str. The ID of the exploration.
"""
version = self.normalized_request.get('v')
exploration = exp_fetchers.get_exploration_by_id(
exploration_id, strict=False, version=version)
if exploration is None:
raise self.PageNotFoundException()
exploration_rights = rights_manager.get_exploration_rights(
exploration_id, strict=False)
user_settings = user_services.get_user_settings(self.user_id)
preferred_audio_language_code = None
preferred_language_codes = None
if user_settings is not None:
preferred_audio_language_code = (
user_settings.preferred_audio_language_code)
preferred_language_codes = (
user_settings.preferred_language_codes)
self.values.update({
'can_edit': (
rights_manager.check_can_edit_activity(
self.user, exploration_rights)),
'exploration': exploration.to_player_dict(),
'exploration_id': exploration_id,
'is_logged_in': bool(self.user_id),
'session_id': utils.generate_new_session_id(),
'version': exploration.version,
'preferred_audio_language_code': preferred_audio_language_code,
'preferred_language_codes': preferred_language_codes,
'auto_tts_enabled': exploration.auto_tts_enabled,
'correctness_feedback_enabled': (
exploration.correctness_feedback_enabled),
'record_playthrough_probability': (
config_domain.RECORD_PLAYTHROUGH_PROBABILITY.value),
})
self.render_json(self.values)
class PretestHandler(base.BaseHandler):
"""Provides subsequent pretest questions after initial batch."""
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'story_url_fragment': constants.SCHEMA_FOR_STORY_URL_FRAGMENTS,
}
}
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_play_exploration
def get(self, exploration_id):
"""Handles GET request."""
story_url_fragment = self.request.get('story_url_fragment')
story = story_fetchers.get_story_by_url_fragment(story_url_fragment)
if story is None:
raise self.InvalidInputException
if not story.has_exploration(exploration_id):
raise self.InvalidInputException
pretest_questions = (
question_services.get_questions_by_skill_ids(
feconf.NUM_PRETEST_QUESTIONS,
story.get_prerequisite_skill_ids_for_exp_id(exploration_id),
True)
)
question_dicts = [question.to_dict() for question in pretest_questions]
self.values.update({
'pretest_question_dicts': question_dicts,
})
self.render_json(self.values)
class StorePlaythroughHandler(base.BaseHandler):
"""Commits a playthrough recorded on the frontend to storage."""
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'issue_schema_version': {
'schema': {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 1
}]
},
},
'playthrough_data': {
'schema': {
'type': 'object_dict',
'object_class': stats_domain.Playthrough
}
},
}
}
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests. Appends to existing list of playthroughs or
deletes it if already full.
Args:
exploration_id: str. The ID of the exploration.
"""
issue_schema_version = self.normalized_payload.get(
'issue_schema_version')
playthrough = self.normalized_payload.get('playthrough_data')
exp_issues = stats_services.get_exp_issues(
exploration_id, playthrough.exp_version)
if stats_services.assign_playthrough_to_corresponding_issue(
playthrough, exp_issues, issue_schema_version):
stats_services.save_exp_issues_model(exp_issues)
self.render_json({})
class StatsEventsHandler(base.BaseHandler):
"""Handles a batch of events coming in from the frontend."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'aggregated_stats': {
'schema': {
'type': 'object_dict',
'validation_method': (
domain_objects_validator.validate_aggregated_stats),
}
},
'exp_version': {
'schema': {
'type': 'int',
'validators': [{
'id': 'is_at_least',
# Version must be greater than zero.
'min_value': 1
}]
}
}
}
}
@acl_decorators.can_play_exploration
def post(self, exploration_id):
aggregated_stats = self.normalized_payload.get('aggregated_stats')
exp_version = self.normalized_payload.get('exp_version')
event_services.StatsEventsHandler.record(
exploration_id, exp_version, aggregated_stats)
self.render_json({})
class AnswerSubmittedEventHandler(base.BaseHandler):
"""Tracks a learner submitting an answer."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests.
Args:
exploration_id: str. The ID of the exploration.
"""
old_state_name = self.payload.get('old_state_name')
# The reader's answer.
answer = self.payload.get('answer')
# Parameters associated with the learner.
params = self.payload.get('params', {})
# The version of the exploration.
version = self.payload.get('version')
if version is None:
raise self.InvalidInputException(
'NONE EXP VERSION: Answer Submit')
session_id = self.payload.get('session_id')
client_time_spent_in_secs = self.payload.get(
'client_time_spent_in_secs')
# The answer group and rule spec indexes, which will be used to get
# the rule spec string.
answer_group_index = self.payload.get('answer_group_index')
rule_spec_index = self.payload.get('rule_spec_index')
classification_categorization = self.payload.get(
'classification_categorization')
exploration = exp_fetchers.get_exploration_by_id(
exploration_id, version=version)
old_interaction = exploration.states[old_state_name].interaction
old_interaction_instance = (
interaction_registry.Registry.get_interaction_by_id(
old_interaction.id))
normalized_answer = old_interaction_instance.normalize_answer(answer)
event_services.AnswerSubmissionEventHandler.record(
exploration_id, version, old_state_name,
exploration.states[old_state_name].interaction.id,
answer_group_index, rule_spec_index, classification_categorization,
session_id, client_time_spent_in_secs, params, normalized_answer)
self.render_json({})
class StateHitEventHandler(base.BaseHandler):
"""Tracks a learner hitting a new state."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests.
Args:
exploration_id: str. The ID of the exploration.
"""
new_state_name = self.payload.get('new_state_name')
exploration_version = self.payload.get('exploration_version')
if exploration_version is None:
raise self.InvalidInputException(
'NONE EXP VERSION: State hit')
session_id = self.payload.get('session_id')
# TODO(sll): Why do we not record the value of this anywhere?
client_time_spent_in_secs = self.payload.get( # pylint: disable=unused-variable
'client_time_spent_in_secs')
old_params = self.payload.get('old_params')
# Record the state hit, if it is not the END state.
if new_state_name is not None:
event_services.StateHitEventHandler.record(
exploration_id, exploration_version, new_state_name,
session_id, old_params, feconf.PLAY_TYPE_NORMAL)
else:
logging.exception('Unexpected StateHit event for the END state.')
self.render_json({})
class StateCompleteEventHandler(base.BaseHandler):
"""Tracks a learner complete a state. Here, 'completing' means answering
the state and progressing to a new state.
"""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests."""
if self.payload.get('exp_version') is None:
raise self.InvalidInputException(
'NONE EXP VERSION: State Complete')
event_services.StateCompleteEventHandler.record(
exploration_id, self.payload.get('exp_version'),
self.payload.get('state_name'), self.payload.get('session_id'),
self.payload.get('time_spent_in_state_secs'))
self.render_json({})
class LeaveForRefresherExpEventHandler(base.BaseHandler):
"""Tracks a learner leaving an exploration for a refresher exploration."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests."""
event_services.LeaveForRefresherExpEventHandler.record(
exploration_id, self.payload.get('refresher_exp_id'),
self.payload.get('exp_version'), self.payload.get('state_name'),
self.payload.get('session_id'),
self.payload.get('time_spent_in_state_secs'))
self.render_json({})
class ReaderFeedbackHandler(base.BaseHandler):
"""Submits feedback from the reader."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests.
Args:
exploration_id: str. The ID of the exploration.
"""
subject = self.payload.get('subject', 'Feedback from a learner')
feedback = self.payload.get('feedback')
include_author = self.payload.get('include_author')
feedback_services.create_thread(
feconf.ENTITY_TYPE_EXPLORATION,
exploration_id,
self.user_id if include_author else None,
subject,
feedback)
self.render_json(self.values)
class ExplorationStartEventHandler(base.BaseHandler):
"""Tracks a learner starting an exploration."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'params': {
'schema': {
'type': 'dict',
'properties': []
}
},
'session_id': {
'schema': {
'type': 'basestring'
}
},
'state_name': {
'schema': {
'type': 'basestring'
}
},
'version': {
'schema': {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 1
}]
}
},
}
}
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests.
Args:
exploration_id: str. The ID of the exploration.
"""
event_services.StartExplorationEventHandler.record(
exploration_id,
self.normalized_payload.get('version'),
self.normalized_payload.get('state_name'),
self.normalized_payload.get('session_id'),
self.normalized_payload.get('params'),
feconf.PLAY_TYPE_NORMAL)
self.render_json({})
class ExplorationActualStartEventHandler(base.BaseHandler):
"""Tracks a learner actually starting an exploration. These are the learners
who traverse past the initial state.
"""
REQUIRE_PAYLOAD_CSRF_CHECK = False
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'exploration_version': {
'schema': {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 1
}]
}
},
'state_name': {
'schema': {
'type': 'basestring'
}
},
'session_id': {
'schema': {
'type': 'basestring'
}
},
}
}
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests."""
event_services.ExplorationActualStartEventHandler.record(
exploration_id,
self.normalized_payload.get('exploration_version'),
self.normalized_payload.get('state_name'),
self.normalized_payload.get('session_id'))
self.render_json({})
class SolutionHitEventHandler(base.BaseHandler):
"""Tracks a learner clicking on the 'View Solution' button."""
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': editor.SCHEMA_FOR_EXPLORATION_ID
}
}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'exploration_version': {
'schema': editor.SCHEMA_FOR_VERSION
},
'state_name': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_STATE_NAME_LENGTH
}]
}
},
'session_id': {
'schema': {
'type': 'basestring'
}
},
'time_spent_in_state_secs': {
'schema': {
'type': 'float',
'validators': [{
'id': 'is_at_least',
'min_value': 0
}]
}
}
}
}
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests."""
event_services.SolutionHitEventHandler.record(
exploration_id,
self.normalized_payload.get('exploration_version'),
self.normalized_payload.get('state_name'),
self.normalized_payload.get('session_id'),
self.normalized_payload.get('time_spent_in_state_secs'))
self.render_json({})
class ExplorationCompleteEventHandler(base.BaseHandler):
"""Tracks a learner completing an exploration.
The state name recorded should be a state with a terminal interaction.
"""
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': editor.SCHEMA_FOR_EXPLORATION_ID
}
}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'collection_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
},
'default_value': None
},
'version': {
'schema': editor.SCHEMA_FOR_VERSION
},
'state_name': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_STATE_NAME_LENGTH
}]
}
},
'session_id': {
'schema': {
'type': 'basestring'
}
},
'client_time_spent_in_secs': {
'schema': {
'type': 'float',
'validators': [{
'id': 'is_at_least',
'min_value': 0
}]
}
},
'params': {
'schema': {
'type': 'object_dict',
'validation_method': (
domain_objects_validator.validate_params_dict),
}
}
}
}
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests.
Args:
exploration_id: str. The ID of the exploration.
"""
# This will be None if the exploration is not being played within the
# context of a collection.
collection_id = self.normalized_payload.get('collection_id')
user_id = self.user_id
event_services.CompleteExplorationEventHandler.record(
exploration_id,
self.normalized_payload.get('version'),
self.normalized_payload.get('state_name'),
self.normalized_payload.get('session_id'),
self.normalized_payload.get('client_time_spent_in_secs'),
self.normalized_payload.get('params'),
feconf.PLAY_TYPE_NORMAL)
if user_id:
learner_progress_services.mark_exploration_as_completed(
user_id, exploration_id)
if user_id and collection_id:
collection_services.record_played_exploration_in_collection_context(
user_id, collection_id, exploration_id)
next_exp_id_to_complete = (
collection_services.get_next_exploration_id_to_complete_by_user( # pylint: disable=line-too-long
user_id, collection_id))
if not next_exp_id_to_complete:
learner_progress_services.mark_collection_as_completed(
user_id, collection_id)
else:
learner_progress_services.mark_collection_as_incomplete(
user_id, collection_id)
self.render_json(self.values)
class ExplorationMaybeLeaveHandler(base.BaseHandler):
"""Tracks a learner leaving an exploration without completing it.
The state name recorded should be a state with a non-terminal interaction.
"""
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': editor.SCHEMA_FOR_EXPLORATION_ID
}
}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'version': {
'schema': editor.SCHEMA_FOR_VERSION
},
'state_name': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_STATE_NAME_LENGTH
}]
}
},
'collection_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
},
'default_value': None
},
'session_id': {
'schema': {
'type': 'basestring'
}
},
'client_time_spent_in_secs': {
'schema': {
'type': 'float',
'validators': [{
'id': 'is_at_least',
'min_value': 0
}]
}
},
'params': {
'schema': {
'type': 'object_dict',
'validation_method': (
domain_objects_validator.validate_params_dict),
}
}
}
}
REQUIRE_PAYLOAD_CSRF_CHECK = False
@acl_decorators.can_play_exploration
def post(self, exploration_id):
"""Handles POST requests.
Args:
exploration_id: str. The ID of the exploration.
"""
version = self.normalized_payload.get('version')
state_name = self.normalized_payload.get('state_name')
user_id = self.user_id
collection_id = self.normalized_payload.get('collection_id')
story_id = exp_services.get_story_id_linked_to_exploration(
exploration_id)
if user_id:
learner_progress_services.mark_exploration_as_incomplete(
user_id, exploration_id, state_name, version)
if user_id and collection_id:
learner_progress_services.mark_collection_as_incomplete(
user_id, collection_id)
if user_id and story_id:
story = story_fetchers.get_story_by_id(story_id)
if story is not None:
learner_progress_services.record_story_started(
user_id, story.id)
if story.corresponding_topic_id is not None:
learner_progress_services.record_topic_started(
user_id, story.corresponding_topic_id)
else:
logging.error(
'Could not find a story corresponding to %s '
'id.' % story_id)
self.render_json({})
return
event_services.MaybeLeaveExplorationEventHandler.record(
exploration_id,
version,
state_name,
self.normalized_payload.get('session_id'),
self.normalized_payload.get('client_time_spent_in_secs'),
self.normalized_payload.get('params'),
feconf.PLAY_TYPE_NORMAL)
self.render_json(self.values)
class LearnerIncompleteActivityHandler(base.BaseHandler):
"""Handles operations related to the activities in the incomplete list of
the user.
"""
URL_PATH_ARGS_SCHEMAS = {
'activity_type': {
'schema': {
'type': 'basestring',
'choices': [
constants.ACTIVITY_TYPE_EXPLORATION,
constants.ACTIVITY_TYPE_COLLECTION,
constants.ACTIVITY_TYPE_STORY,
constants.ACTIVITY_TYPE_LEARN_TOPIC
]
}
},
'activity_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'DELETE': {}
}
@acl_decorators.can_access_learner_dashboard
def delete(self, activity_type, activity_id):
"""Removes exploration, collection, story or topic from incomplete
list.
Args:
activity_type: str. The activity type. Currently, it can take values
"exploration", "collection", "story" or "topic".
activity_id: str. The ID of the activity to be deleted.
"""
if activity_type == constants.ACTIVITY_TYPE_EXPLORATION:
learner_progress_services.remove_exp_from_incomplete_list(
self.user_id, activity_id)
elif activity_type == constants.ACTIVITY_TYPE_COLLECTION:
learner_progress_services.remove_collection_from_incomplete_list(
self.user_id, activity_id)
elif activity_type == constants.ACTIVITY_TYPE_STORY:
learner_progress_services.remove_story_from_incomplete_list(
self.user_id, activity_id)
elif activity_type == constants.ACTIVITY_TYPE_LEARN_TOPIC:
learner_progress_services.remove_topic_from_partially_learnt_list(
self.user_id, activity_id)
self.render_json(self.values)
class RatingHandler(base.BaseHandler):
"""Records the rating of an exploration submitted by a user.
Note that this represents ratings submitted on completion of the
exploration.
"""
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'GET': {},
'PUT': {
'user_rating': {
'schema': {
'type': 'int',
'choices': [1, 2, 3, 4, 5]
}
}
}
}
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_play_exploration
def get(self, exploration_id):
"""Handles GET requests."""
self.values.update({
'overall_ratings':
rating_services.get_overall_ratings_for_exploration(
exploration_id),
'user_rating': (
rating_services.get_user_specific_rating_for_exploration(
self.user_id, exploration_id) if self.user_id else None)
})
self.render_json(self.values)
@acl_decorators.can_rate_exploration
def put(self, exploration_id):
"""Handles PUT requests for submitting ratings at the end of an
exploration.
"""
user_rating = self.normalized_payload.get('user_rating')
rating_services.assign_rating_to_exploration(
self.user_id, exploration_id, user_rating)
self.render_json({})
class RecommendationsHandler(base.BaseHandler):
"""Provides recommendations to be displayed at the end of explorations.
Which explorations are provided depends on whether the exploration was
played within the context of a collection and whether the user is logged in.
If both are true, then the explorations are suggested from the collection,
if there are upcoming explorations for the learner to complete.
"""
# TODO(bhenning): Move the recommendation selection logic & related tests
# to the domain layer as service methods or to the frontend to reduce the
# amount of logic needed in this handler.
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_play_exploration
def get(self, exploration_id):
"""Handles GET requests."""
collection_id = self.request.get('collection_id')
include_system_recommendations = self.request.get(
'include_system_recommendations')
try:
author_recommended_exp_ids = json.loads(self.request.get(
'stringified_author_recommended_ids'))
except Exception:
raise self.PageNotFoundException
system_recommended_exp_ids = []
next_exp_id = None
if collection_id:
if self.user_id:
next_exp_id = (
collection_services.get_next_exploration_id_to_complete_by_user( # pylint: disable=line-too-long
self.user_id, collection_id))
else:
collection = collection_services.get_collection_by_id(
collection_id)
next_exp_id = (
collection.get_next_exploration_id_in_sequence(
exploration_id))
elif include_system_recommendations:
system_chosen_exp_ids = (
recommendations_services.get_exploration_recommendations(
exploration_id))
filtered_exp_ids = list(
set(system_chosen_exp_ids) - set(author_recommended_exp_ids))
system_recommended_exp_ids = random.sample(
filtered_exp_ids,
min(MAX_SYSTEM_RECOMMENDATIONS, len(filtered_exp_ids)))
recommended_exp_ids = set(
author_recommended_exp_ids + system_recommended_exp_ids)
if next_exp_id is not None:
recommended_exp_ids.add(next_exp_id)
self.values.update({
'summaries': (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
recommended_exp_ids)),
})
self.render_json(self.values)
class FlagExplorationHandler(base.BaseHandler):
"""Handles operations relating to learner flagging of explorations."""
URL_PATH_ARGS_SCHEMAS = {
'exploration_id': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'is_regex_matched',
'regex_pattern': constants.ENTITY_ID_REGEX
}]
}
}
}
HANDLER_ARGS_SCHEMAS = {
'POST': {
'report_text': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_flag_exploration
def post(self, exploration_id):
"""Handles POST requests.
Args:
exploration_id: str. The ID of the exploration.
"""
moderator_services.enqueue_flag_exploration_email_task(
exploration_id,
self.normalized_payload.get('report_text'),
self.user_id)
self.render_json(self.values)
class QuestionPlayerHandler(base.BaseHandler):
"""Provides questions with given skill ids."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET request."""
# Skill ids are given as a comma separated list because this is
# a GET request.
skill_ids = self.request.get('skill_ids').split(',')
question_count = self.request.get('question_count')
fetch_by_difficulty_value = self.request.get('fetch_by_difficulty')
if not question_count.isdigit() or int(question_count) <= 0:
raise self.InvalidInputException(
'Question count has to be greater than 0')
if fetch_by_difficulty_value not in ('true', 'false'):
raise self.InvalidInputException(
'fetch_by_difficulty must be true or false')
fetch_by_difficulty = (fetch_by_difficulty_value == 'true')
if len(skill_ids) > feconf.MAX_NUMBER_OF_SKILL_IDS:
skill_ids = skill_services.filter_skills_by_mastery(
self.user_id, skill_ids)
questions = (
question_services.get_questions_by_skill_ids(
int(question_count), skill_ids, fetch_by_difficulty)
)
random.shuffle(questions)
question_dicts = [question.to_dict() for question in questions]
self.values.update({
'question_dicts': question_dicts[:feconf.QUESTION_BATCH_SIZE]
})
self.render_json(self.values)
class LearnerAnswerDetailsSubmissionHandler(base.BaseHandler):
"""Handles the learner answer details submission."""
@acl_decorators.can_play_entity
def put(self, entity_type, entity_id):
""""Handles the PUT requests. Stores the answer details submitted
by the learner.
"""
if not constants.ENABLE_SOLICIT_ANSWER_DETAILS_FEATURE:
raise self.PageNotFoundException
interaction_id = self.payload.get('interaction_id')
if entity_type == feconf.ENTITY_TYPE_EXPLORATION:
state_name = self.payload.get('state_name')
state_reference = (
stats_services.get_state_reference_for_exploration(
entity_id, state_name))
if interaction_id != exp_services.get_interaction_id_for_state(
entity_id, state_name):
raise utils.InvalidInputException(
'Interaction id given does not match with the '
'interaction id of the state')
elif entity_type == feconf.ENTITY_TYPE_QUESTION:
state_reference = (
stats_services.get_state_reference_for_question(entity_id))
if interaction_id != (
question_services.get_interaction_id_for_question(
entity_id)):
raise utils.InvalidInputException(
'Interaction id given does not match with the '
'interaction id of the question')
answer = self.payload.get('answer')
answer_details = self.payload.get('answer_details')
stats_services.record_learner_answer_info(
entity_type, state_reference,
interaction_id, answer, answer_details)
self.render_json({})
| |
# coding=utf-8
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from bson.code import Code
import os
import unittest
from xml.dom.minidom import parse
import pymongo
from trnltk.ngrams.ngramgenerator import WordNGramGenerator, WordUnigramWithParseResultGenerator
from trnltk.parseset.xmlbindings import ParseSetBinding, UnparsableWordBinding
def _count_distinct_ngrams(collection, keys, filter_criteria):
mapper = Code("""
function(){
emit({
""" + keys + """
}, {count: 1});
}
""")
reducer = Code("""
function(key,values){
var total = 0;
for (var i = 0; i < values.length; i++) {
total += values[i].count
}
return {count:total};
}
""")
result = collection.map_reduce(mapper, reducer, "_temporary")
if filter_criteria:
result = result.find(filter_criteria)
return result.count()
class WordUnigramMongodbGeneratorTest(unittest.TestCase):
BULK_INSERT_SIZE = 500
@classmethod
def setUpClass(cls):
super(WordUnigramMongodbGeneratorTest, cls).setUpClass()
connection = pymongo.Connection(host="127.0.0.1")
cls.db = connection['trnltk']
def test_create_unigrams_for_parseset_001(self):
self._create_unigrams_for_parseset_n("001")
def test_create_unigrams_for_parseset_002(self):
self._create_unigrams_for_parseset_n("002")
def test_create_unigrams_for_parseset_003(self):
self._create_unigrams_for_parseset_n("003")
def test_create_unigrams_for_parseset_004(self):
self._create_unigrams_for_parseset_n("004")
def test_create_unigrams_for_parseset_005(self):
self._create_unigrams_for_parseset_n("005")
def test_create_unigrams_for_parseset_999(self):
self._create_unigrams_for_parseset_n("999")
def test_inspect_unigrams_for_parseset_001(self):
self._inspect_unigrams_for_parseset_n("001")
def test_inspect_unigrams_for_parseset_002(self):
self._inspect_unigrams_for_parseset_n("002")
def test_inspect_unigrams_for_parseset_003(self):
self._inspect_unigrams_for_parseset_n("003")
def test_inspect_unigrams_for_parseset_004(self):
self._inspect_unigrams_for_parseset_n("004")
def test_inspect_unigrams_for_parseset_005(self):
self._inspect_unigrams_for_parseset_n("005")
def test_inspect_unigrams_for_parseset_999(self):
self._inspect_unigrams_for_parseset_n("999")
def _create_unigrams_for_parseset_n(self, parseset_index):
print "Parsing parse set {} and generating unigrams with occurrence counts".format(parseset_index)
dom = parse(os.path.join(os.path.dirname(__file__), '../../testresources/parsesets/parseset{}.xml'.format(parseset_index)))
parseset = ParseSetBinding.build(dom.getElementsByTagName("parseset")[0])
print "Found {} sentences".format(len(parseset.sentences))
words = [word for sentence in parseset.sentences for word in sentence.words]
print "Found {} words".format(len(words))
print "Found {} parsable words".format(
len(filter(lambda word: not isinstance(word, UnparsableWordBinding), words)))
generator = WordNGramGenerator(1)
collection = self.db['wordUnigrams{}'.format(parseset_index)]
# delete everything in the collection
collection.remove({})
bulk_insert_buffer = []
for unigram in generator.iter_ngrams(words):
entity = {
'item_0': unigram
}
bulk_insert_buffer.append(entity)
if len(bulk_insert_buffer) % self.BULK_INSERT_SIZE == 0:
collection.insert(bulk_insert_buffer)
bulk_insert_buffer = []
collection.insert(bulk_insert_buffer)
self._inspect_unigrams_for_parseset_n(parseset_index)
def _inspect_unigrams_for_parseset_n(self, parseset_index):
collection = self.db['wordUnigrams{}'.format(parseset_index)]
unigram_count = collection.count()
print "Found {} unigrams".format(unigram_count)
distinct_surface_unigram_count = self._count_distinct_surface_unigrams(collection)
print "Found {} distinct surface unigrams".format(distinct_surface_unigram_count)
distinct_surface_unigram_with_multiple_occurrences_count = self._count_distinct_surface_unigrams_with_multiple_occurrences(collection)
print "Found {} distinct surface unigrams with multiple occurrences".format(distinct_surface_unigram_with_multiple_occurrences_count)
distinct_stem_unigram_count = self._count_distinct_stem_unigrams(collection)
print "Found {} distinct stem unigrams".format(distinct_stem_unigram_count)
distinct_stem_unigram_with_multiple_occurrences_count = self._count_distinct_stem_unigrams_with_multiple_occurrences(collection)
print "Found {} distinct stem unigrams with multiple occurrences".format(distinct_stem_unigram_with_multiple_occurrences_count)
distinct_lexeme_unigram_count = self._count_distinct_lexeme_unigrams(collection)
print "Found {} distinct lexeme unigrams".format(distinct_lexeme_unigram_count)
distinct_lexeme_unigram_with_multiple_occurrences_count = self._count_distinct_lexeme_unigrams_with_multiple_occurrences(collection)
print "Found {} distinct lexeme unigrams with multiple occurrences".format(distinct_lexeme_unigram_with_multiple_occurrences_count)
@classmethod
def _count_distinct_surface_unigrams(cls, collection):
keys = "a:this.item_0.word.surface.value, b:this.item_0.word.surface.syntactic_category"
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_surface_unigrams_with_multiple_occurrences(cls, collection):
keys = "a:this.item_0.word.surface.value, b:this.item_0.word.surface.syntactic_category"
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_stem_unigrams(cls, collection):
keys = "a:this.item_0.word.stem.value, b:this.item_0.word.stem.syntactic_category"
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_stem_unigrams_with_multiple_occurrences(cls, collection):
keys = "a:this.item_0.word.stem.value, b:this.item_0.word.stem.syntactic_category"
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_lexeme_unigrams(cls, collection):
keys = "a:this.item_0.word.lemma_root.value, b:this.item_0.word.lemma_root.syntactic_category"
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_lexeme_unigrams_with_multiple_occurrences(cls, collection):
keys = "a:this.item_0.word.lemma_root.value, b:this.item_0.word.lemma_root.syntactic_category"
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
class WordBigramMongodbGeneratorTest(unittest.TestCase):
BULK_INSERT_SIZE = 500
@classmethod
def setUpClass(cls):
super(WordBigramMongodbGeneratorTest, cls).setUpClass()
connection = pymongo.Connection(host="127.0.0.1")
cls.db = connection['trnltk']
def test_create_bigrams_for_parseset_001(self):
self._create_bigrams_for_parseset_n("001")
def test_create_bigrams_for_parseset_002(self):
self._create_bigrams_for_parseset_n("002")
def test_create_bigrams_for_parseset_003(self):
self._create_bigrams_for_parseset_n("003")
def test_create_bigrams_for_parseset_004(self):
self._create_bigrams_for_parseset_n("004")
def test_create_bigrams_for_parseset_005(self):
self._create_bigrams_for_parseset_n("005")
def test_create_bigrams_for_parseset_999(self):
self._create_bigrams_for_parseset_n("999")
def test_inspect_bigrams_for_parseset_001(self):
self._inspect_bigrams_for_parseset_n("001")
def test_inspect_bigrams_for_parseset_002(self):
self._inspect_bigrams_for_parseset_n("002")
def test_inspect_bigrams_for_parseset_003(self):
self._inspect_bigrams_for_parseset_n("003")
def test_inspect_bigrams_for_parseset_004(self):
self._inspect_bigrams_for_parseset_n("004")
def test_inspect_bigrams_for_parseset_005(self):
self._inspect_bigrams_for_parseset_n("005")
def test_inspect_bigrams_for_parseset_999(self):
self._inspect_bigrams_for_parseset_n("999")
def _create_bigrams_for_parseset_n(self, parseset_index):
print "Parsing parse set {} and generating bigrams with occurrence counts".format(parseset_index)
dom = parse(os.path.join(os.path.dirname(__file__), '../../testresources/parsesets/parseset{}.xml'.format(parseset_index)))
parseset = ParseSetBinding.build(dom.getElementsByTagName("parseset")[0])
print "Found {} sentences".format(len(parseset.sentences))
words = [word for sentence in parseset.sentences for word in sentence.words]
print "Found {} words".format(len(words))
print "Found {} parsable words".format(
len(filter(lambda word: not isinstance(word, UnparsableWordBinding), words)))
generator = WordNGramGenerator(2)
collection = self.db['wordBigrams{}'.format(parseset_index)]
# delete everything in the collection
collection.remove({})
bulk_insert_buffer = []
for bigram in generator.iter_ngrams(words):
entity = {
'item_0': bigram[0],
'item_1': bigram[1]
}
bulk_insert_buffer.append(entity)
if len(bulk_insert_buffer) % self.BULK_INSERT_SIZE == 0:
collection.insert(bulk_insert_buffer)
bulk_insert_buffer = []
collection.insert(bulk_insert_buffer)
self._inspect_bigrams_for_parseset_n(parseset_index)
def _inspect_bigrams_for_parseset_n(self, parseset_index):
collection = self.db['wordBigrams{}'.format(parseset_index)]
bigram_count = collection.count()
print "Found {} bigrams".format(bigram_count)
print "Found {} distinct surface-surface bigrams".format(self._calculate_distinct_surface_surface_bigrams(collection))
print "Found {} distinct surface-surface bigrams with multiple occurrences".format(self._calculate_distinct_surface_surface_bigrams_with_multiple_occurrences(collection))
print "Found {} distinct surface-stem bigrams".format(self._calculate_distinct_surface_stem_bigrams(collection))
print "Found {} distinct surface-stem bigrams with multiple occurrences".format(self._calculate_distinct_surface_stem_bigrams_with_multiple_occurrences(collection))
print "Found {} distinct surface-lexeme bigrams".format(self._calculate_distinct_surface_lexeme_bigrams(collection))
print "Found {} distinct surface-lexeme bigrams with multiple occurrences".format(self._calculate_distinct_surface_lexeme_bigrams_with_multiple_occurrences(collection))
print "Found {} distinct stem-surface bigrams".format(self._calculate_distinct_stem_surface_bigrams(collection))
print "Found {} distinct stem-surface bigrams with multiple occurrences".format(self._calculate_distinct_stem_surface_bigrams_with_multiple_occurrences(collection))
print "Found {} distinct stem-stem bigrams".format(self._calculate_distinct_stem_stem_bigrams(collection))
print "Found {} distinct stem-stem bigrams with multiple occurrences".format(self._calculate_distinct_stem_stem_bigrams_with_multiple_occurrences(collection))
print "Found {} distinct stem-lexeme bigrams".format(self._calculate_distinct_stem_lexeme_bigrams(collection))
print "Found {} distinct stem-lexeme bigrams with multiple occurrences".format(self._calculate_distinct_stem_lexeme_bigrams_with_multiple_occurrences(collection))
print "Found {} distinct lexeme-surface bigrams".format(self._calculate_distinct_lexeme_surface_bigrams(collection))
print "Found {} distinct lexeme-surface bigrams with multiple occurrences".format(self._calculate_distinct_lexeme_surface_bigrams_with_multiple_occurrences(collection))
print "Found {} distinct lexeme-stem bigrams".format(self._calculate_distinct_lexeme_stem_bigrams(collection))
print "Found {} distinct lexeme-stem bigrams with multiple occurrences".format(self._calculate_distinct_lexeme_stem_bigrams_with_multiple_occurrences(collection))
print "Found {} distinct lexeme-lexeme bigrams".format(self._calculate_distinct_lexeme_lexeme_bigrams(collection))
print "Found {} distinct lexeme-lexeme bigrams with multiple occurrences".format(self._calculate_distinct_lexeme_lexeme_bigrams_with_multiple_occurrences(collection))
####################################################################
@classmethod
def _calculate_distinct_surface_surface_bigrams(cls, collection):
keys = """
a:this.item_0.word.surface.value, b:this.item_1.word.surface.value,
c:this.item_0.word.surface.syntactic_category, d:this.item_1.word.surface.syntactic_category
"""
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_surface_surface_bigrams_with_multiple_occurrences(cls, collection):
keys = """
a:this.item_0.word.surface.value, b:this.item_1.word.surface.value,
c:this.item_0.word.surface.syntactic_category, d:this.item_1.word.surface.syntactic_category
"""
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_surface_stem_bigrams(cls, collection):
keys = """
a:this.item_0.word.surface.value, b:this.item_1.word.stem.value,
c:this.item_0.word.surface.syntactic_category, d:this.item_1.word.stem.syntactic_category
"""
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_surface_stem_bigrams_with_multiple_occurrences(cls, collection):
keys = """
a:this.item_0.word.surface.value, b:this.item_1.word.stem.value,
c:this.item_0.word.surface.syntactic_category, d:this.item_1.word.stem.syntactic_category
"""
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_surface_lexeme_bigrams(cls, collection):
keys = """
a:this.item_0.word.surface.value, b:this.item_1.word.lemma_root.value,
c:this.item_0.word.surface.syntactic_category, d:this.item_1.word.lemma_root.syntactic_category
"""
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_surface_lexeme_bigrams_with_multiple_occurrences(cls, collection):
keys = """
a:this.item_0.word.surface.value, b:this.item_1.word.lemma_root.value,
c:this.item_0.word.surface.syntactic_category, d:this.item_1.word.lemma_root.syntactic_category
"""
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
####################################################################
@classmethod
def _calculate_distinct_stem_surface_bigrams(cls, collection):
keys = """
a:this.item_0.word.stem.value, b:this.item_1.word.surface.value,
c:this.item_0.word.stem.syntactic_category, d:this.item_1.word.surface.syntactic_category
"""
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_stem_surface_bigrams_with_multiple_occurrences(cls, collection):
keys = """
a:this.item_0.word.stem.value, b:this.item_1.word.surface.value,
c:this.item_0.word.stem.syntactic_category, d:this.item_1.word.surface.syntactic_category
"""
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_stem_stem_bigrams(cls, collection):
keys = """
a:this.item_0.word.stem.value, b:this.item_1.word.stem.value,
c:this.item_0.word.stem.syntactic_category, d:this.item_1.word.stem.syntactic_category
"""
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_stem_stem_bigrams_with_multiple_occurrences(cls, collection):
keys = """
a:this.item_0.word.stem.value, b:this.item_1.word.stem.value,
c:this.item_0.word.stem.syntactic_category, d:this.item_1.word.stem.syntactic_category
"""
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_stem_lexeme_bigrams(cls, collection):
keys = """
a:this.item_0.word.stem.value, b:this.item_1.word.lemma_root.value,
c:this.item_0.word.stem.syntactic_category, d:this.item_1.word.lemma_root.syntactic_category
"""
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_stem_lexeme_bigrams_with_multiple_occurrences(cls, collection):
keys = """
a:this.item_0.word.stem.value, b:this.item_1.word.lemma_root.value,
c:this.item_0.word.stem.syntactic_category, d:this.item_1.word.lemma_root.syntactic_category
"""
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
####################################################################
@classmethod
def _calculate_distinct_lexeme_surface_bigrams(cls, collection):
keys = """
a:this.item_0.word.lemma_root.value, b:this.item_1.word.surface.value,
c:this.item_0.word.lemma_root.syntactic_category, d:this.item_1.word.surface.syntactic_category
"""
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_lexeme_surface_bigrams_with_multiple_occurrences(cls, collection):
keys = """
a:this.item_0.word.lemma_root.value, b:this.item_1.word.surface.value,
c:this.item_0.word.lemma_root.syntactic_category, d:this.item_1.word.surface.syntactic_category
"""
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_lexeme_stem_bigrams(cls, collection):
keys = """
a:this.item_0.word.lemma_root.value, b:this.item_1.word.stem.value,
c:this.item_0.word.lemma_root.syntactic_category, d:this.item_1.word.stem.syntactic_category
"""
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_lexeme_stem_bigrams_with_multiple_occurrences(cls, collection):
keys = """
a:this.item_0.word.lemma_root.value, b:this.item_1.word.stem.value,
c:this.item_0.word.lemma_root.syntactic_category, d:this.item_1.word.stem.syntactic_category
"""
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_lexeme_lexeme_bigrams(cls, collection):
keys = """
a:this.item_0.word.lemma_root.value, b:this.item_1.word.lemma_root.value,
c:this.item_0.word.lemma_root.syntactic_category, d:this.item_1.word.lemma_root.syntactic_category
"""
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _calculate_distinct_lexeme_lexeme_bigrams_with_multiple_occurrences(cls, collection):
keys = """
a:this.item_0.word.lemma_root.value, b:this.item_1.word.lemma_root.value,
c:this.item_0.word.lemma_root.syntactic_category, d:this.item_1.word.lemma_root.syntactic_category
"""
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
class WordTrigramMongodbGeneratorTest(unittest.TestCase):
BULK_INSERT_SIZE = 500
@classmethod
def setUpClass(cls):
super(WordTrigramMongodbGeneratorTest, cls).setUpClass()
connection = pymongo.Connection(host="127.0.0.1")
cls.db = connection['trnltk']
def test_create_trigrams_for_parseset_001(self):
self._create_trigrams_for_parseset_n("001")
def test_create_trigrams_for_parseset_002(self):
self._create_trigrams_for_parseset_n("002")
def test_create_trigrams_for_parseset_003(self):
self._create_trigrams_for_parseset_n("003")
def test_create_trigrams_for_parseset_004(self):
self._create_trigrams_for_parseset_n("004")
def test_create_trigrams_for_parseset_005(self):
self._create_trigrams_for_parseset_n("005")
def test_create_trigrams_for_parseset_999(self):
self._create_trigrams_for_parseset_n("999")
def _create_trigrams_for_parseset_n(self, parseset_index):
print "Parsing parse set {} and generating trigrams with occurrence counts".format(parseset_index)
dom = parse(os.path.join(os.path.dirname(__file__), '../../testresources/parsesets/parseset{}.xml'.format(parseset_index)))
parseset = ParseSetBinding.build(dom.getElementsByTagName("parseset")[0])
print "Found {} sentences".format(len(parseset.sentences))
words = [word for sentence in parseset.sentences for word in sentence.words]
print "Found {} words".format(len(words))
print "Found {} parsable words".format(
len(filter(lambda word: not isinstance(word, UnparsableWordBinding), words)))
generator = WordNGramGenerator(3)
collection = self.db['wordTrigrams{}'.format(parseset_index)]
# delete everything in the collection
collection.remove({})
bulk_insert_buffer = []
for trigram in generator.iter_ngrams(words):
entity = {
'item_0': trigram[0],
'item_1': trigram[1],
'item_2': trigram[2]
}
bulk_insert_buffer.append(entity)
if len(bulk_insert_buffer) % self.BULK_INSERT_SIZE == 0:
collection.insert(bulk_insert_buffer)
bulk_insert_buffer = []
collection.insert(bulk_insert_buffer)
trigram_count = collection.count()
print "Generated {} trigrams".format(trigram_count)
class WordUnigramWithParseResultGeneratorMongodbTest(unittest.TestCase):
BULK_INSERT_SIZE = 500
@classmethod
def setUpClass(cls):
super(WordUnigramWithParseResultGeneratorMongodbTest, cls).setUpClass()
connection = pymongo.Connection(host="127.0.0.1")
cls.db = connection['trnltk']
def test_create_unigrams_for_parseset_001(self):
self._create_unigrams_for_parseset_n("001")
def test_create_unigrams_for_parseset_002(self):
self._create_unigrams_for_parseset_n("002")
def test_create_unigrams_for_parseset_003(self):
self._create_unigrams_for_parseset_n("003")
def test_create_unigrams_for_parseset_004(self):
self._create_unigrams_for_parseset_n("004")
def test_create_unigrams_for_parseset_005(self):
self._create_unigrams_for_parseset_n("005")
def test_create_unigrams_for_parseset_999(self):
self._create_unigrams_for_parseset_n("999")
def test_inspect_unigrams_for_parseset_001(self):
self._inspect_unigrams_for_parseset_n("001")
def test_inspect_unigrams_for_parseset_002(self):
self._inspect_unigrams_for_parseset_n("002")
def test_inspect_unigrams_for_parseset_003(self):
self._inspect_unigrams_for_parseset_n("003")
def test_inspect_unigrams_for_parseset_004(self):
self._inspect_unigrams_for_parseset_n("004")
def test_inspect_unigrams_for_parseset_005(self):
self._inspect_unigrams_for_parseset_n("005")
def test_inspect_unigrams_for_parseset_999(self):
self._inspect_unigrams_for_parseset_n("999")
def _create_unigrams_for_parseset_n(self, parseset_index):
print "Parsing parse set {} and generating unigrams with occurrence counts and parse results".format(parseset_index)
dom = parse(os.path.join(os.path.dirname(__file__), '../../testresources/parsesets/parseset{}.xml'.format(parseset_index)))
parseset = ParseSetBinding.build(dom.getElementsByTagName("parseset")[0])
print "Found {} sentences".format(len(parseset.sentences))
words = [word for sentence in parseset.sentences for word in sentence.words]
print "Found {} words".format(len(words))
print "Found {} parsable words".format(
len(filter(lambda word: not isinstance(word, UnparsableWordBinding), words)))
generator = WordUnigramWithParseResultGenerator()
collection = self.db['wordUnigrams{}'.format(parseset_index)]
# delete everything in the collection
collection.remove({})
bulk_insert_buffer = []
for unigram in generator.iter_ngrams(words):
entity = {
'item_0': unigram
}
bulk_insert_buffer.append(entity)
if len(bulk_insert_buffer) % self.BULK_INSERT_SIZE == 0:
collection.insert(bulk_insert_buffer)
bulk_insert_buffer = []
collection.insert(bulk_insert_buffer)
self._inspect_unigrams_for_parseset_n(parseset_index)
def _inspect_unigrams_for_parseset_n(self, parseset_index):
collection = self.db['wordUnigrams{}'.format(parseset_index)]
unigram_count = collection.count()
print "Found {} unigrams".format(unigram_count)
distinct_surface_unigram_count = self._count_distinct_surface_unigrams(collection)
print "Found {} distinct surface unigrams".format(distinct_surface_unigram_count)
distinct_surface_unigram_with_multiple_occurrences_count = self._count_distinct_surface_unigrams_with_multiple_occurrences(collection)
print "Found {} distinct surface unigrams with multiple occurrences".format(distinct_surface_unigram_with_multiple_occurrences_count)
distinct_stem_unigram_count = self._count_distinct_stem_unigrams(collection)
print "Found {} distinct stem unigrams".format(distinct_stem_unigram_count)
distinct_stem_unigram_with_multiple_occurrences_count = self._count_distinct_stem_unigrams_with_multiple_occurrences(collection)
print "Found {} distinct stem unigrams with multiple occurrences".format(distinct_stem_unigram_with_multiple_occurrences_count)
distinct_lexeme_unigram_count = self._count_distinct_lexeme_unigrams(collection)
print "Found {} distinct lexeme unigrams".format(distinct_lexeme_unigram_count)
distinct_lexeme_unigram_with_multiple_occurrences_count = self._count_distinct_lexeme_unigrams_with_multiple_occurrences(collection)
print "Found {} distinct lexeme unigrams with multiple occurrences".format(distinct_lexeme_unigram_with_multiple_occurrences_count)
@classmethod
def _count_distinct_surface_unigrams(cls, collection):
keys = "a:this.item_0.word.surface.value, b:this.item_0.word.surface.syntactic_category"
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_surface_unigrams_with_multiple_occurrences(cls, collection):
keys = "a:this.item_0.word.surface.value, b:this.item_0.word.surface.syntactic_category"
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_stem_unigrams(cls, collection):
keys = "a:this.item_0.word.stem.value, b:this.item_0.word.stem.syntactic_category"
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_stem_unigrams_with_multiple_occurrences(cls, collection):
keys = "a:this.item_0.word.stem.value, b:this.item_0.word.stem.syntactic_category"
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_lexeme_unigrams(cls, collection):
keys = "a:this.item_0.word.lemma_root.value, b:this.item_0.word.lemma_root.syntactic_category"
filter_criteria = None
return _count_distinct_ngrams(collection, keys, filter_criteria)
@classmethod
def _count_distinct_lexeme_unigrams_with_multiple_occurrences(cls, collection):
keys = "a:this.item_0.word.lemma_root.value, b:this.item_0.word.lemma_root.syntactic_category"
filter_criteria = {"value.count": {"$gt": 1}}
return _count_distinct_ngrams(collection, keys, filter_criteria)
if __name__ == '__main__':
unittest.main()
| |
"""
Definition of the Session class.
"""
import re
import sys
import time
import json
import base64
import random
import hashlib
import asyncio
import weakref
import datetime
from http.cookies import SimpleCookie
from ..event._component import new_type
from ._component2 import PyComponent, JsComponent, AppComponentMeta
from ._asset import Asset, Bundle, solve_dependencies
from ._assetstore import AssetStore, INDEX
from ._assetstore import assets as assetstore
from ._clientcore import serializer
from . import logger
from .. import config
reprs = json.dumps
# Use the system PRNG for session id generation (if possible)
# NOTE: secure random string generation implementation is adapted
# from the Django project.
def get_random_string(length=24, allowed_chars=None):
""" Produce a securely generated random string.
With a length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
allowed_chars = allowed_chars or ('abcdefghijklmnopqrstuvwxyz' +
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
try:
srandom = random.SystemRandom()
except NotImplementedError: # pragma: no cover
srandom = random
logger.warning('Falling back to less secure Mersenne Twister random string.')
bogus = "%s%s%s" % (random.getstate(), time.time(), 'sdkhfbsdkfbsdbhf')
random.seed(hashlib.sha256(bogus.encode()).digest())
return ''.join(srandom.choice(allowed_chars) for i in range(length))
class Session:
""" A connection between Python and the client runtime (JavaScript).
The session is what holds together the app widget, the web runtime,
and the websocket instance that connects to it.
Responsibilities:
* Send messages to the client and process messages received by the client.
* Keep track of PyComponent instances used by the session.
* Keep track of JsComponent instances associated with the session.
* Ensure that the client has all the module definitions it needs.
"""
STATUS = new_type('Enum', (), {'PENDING': 1, 'CONNECTED': 2, 'CLOSED': 0})
def __init__(self, app_name, store=None,
request=None): # Allow custom store for testing
self._store = store if (store is not None) else assetstore
assert isinstance(self._store, AssetStore)
self._creation_time = time.time() # used by app manager
# Id and name of the app
self._id = get_random_string()
self._app_name = app_name
# To keep track of what modules are defined at the client
self._present_classes = set() # Component classes known by the client
self._present_modules = set() # module names that, plus deps
self._present_assets = set() # names of used associated assets
self._assets_to_ignore = set() # user settable
# Data for this session (in addition to the data provided by the store)
self._data = {}
# More vars
self._runtime = None # init web runtime, will be set when used
self._ws = None # init websocket, will be set when a connection is made
self._closing = False # Flag to help with shutdown
# PyComponent or JsComponent instance, can be None if app_name is __default__
self._component = None
# The session assigns component id's and keeps track of component objects
self._component_counter = 0
self._component_instances = weakref.WeakValueDictionary()
self._dead_component_ids = set()
# Keep track of roundtrips. The _ping_calls elements are:
# [ping_count, {objects}, *(callback, args)]
self._ping_calls = []
self._ping_counter = 0
self._eval_result = {}
self._eval_count = 0
# While the client is not connected, we keep a queue of
# commands, which are send to the client as soon as it connects
self._pending_commands = []
# request related information
self._request = request
if request and request.cookies:
cookies = request.cookies
else:
cookies = {}
self._set_cookies(cookies)
def __repr__(self):
t = '<%s for %r (%i) at 0x%x>'
return t % (self.__class__.__name__, self.app_name, self.status, id(self))
@property
def request(self):
"""The tornado request that was at the origin of this session.
"""
return self._request
@property
def id(self):
""" The unique identifier of this session.
"""
return self._id
@property
def app_name(self):
""" The name of the application that this session represents.
"""
return self._app_name
@property
def app(self):
""" The root PyComponent or JsComponent instance that represents the app.
"""
return self._component
@property
def runtime(self):
""" The runtime that is rendering this app instance. Can be
None if the client is a browser.
"""
return self._runtime
@property
def status(self):
""" The status of this session.
The lifecycle for each session is:
* status 1: pending
* status 2: connected
* status 0: closed
"""
if self._ws is None:
return self.STATUS.PENDING # not connected yet
elif self._ws.close_code is None:
return self.STATUS.CONNECTED # alive and kicking
else:
return self.STATUS.CLOSED # connection closed
@property
def present_modules(self):
""" The set of module names that is (currently) available at the client.
"""
return set(self._present_modules)
@property
def assets_to_ignore(self):
""" The set of names of assets that should *not* be pushed to
the client, e.g. because they are already present on the page.
Add names to this set to prevent them from being loaded.
"""
return self._assets_to_ignore
def close(self):
""" Close the session: close websocket, close runtime, dispose app.
"""
# Stop guarding objects to break down any circular refs
self._ping_calls = []
self._closing = True # suppress warnings for session being closed.
try:
# Close the websocket
if self._ws:
self._ws.close_this()
# Close the runtime
if self._runtime:
self._runtime.close()
# Dispose the component and break the circular reference
if self._component is not None:
self._component.dispose()
self._component = None
# Discard data
self._data = {}
finally:
self._closing = False
## Hooking up with app, websocket, runtime
def _set_ws(self, ws):
""" A session is always first created, so we know what page to
serve. The client will connect the websocket, and communicate
the session_id so it can be connected to the correct Session
via this method
"""
if self._ws is not None:
raise RuntimeError('Session is already connected.')
# Set websocket object - this is what changes the status to CONNECTED
self._ws = ws
self._ws.write_command(("PRINT", "Flexx session says hi"))
# Send pending commands
for command in self._pending_commands:
self._ws.write_command(command)
self._ws.write_command(('INIT_DONE', ))
def _set_cookies(self, cookies=None):
""" To set cookies, must be an http.cookie.SimpleCookie object.
When the app is loaded as a web app, the cookies are set *before* the
main component is instantiated. Otherwise they are set when the websocket
is connected.
"""
self._cookies = cookies if cookies else SimpleCookie()
def _set_runtime(self, runtime):
if self._runtime is not None:
raise RuntimeError('Session already has a runtime.')
self._runtime = runtime
## Cookies, mmm
def get_cookie(self, name, default=None, max_age_days=31, min_version=None):
""" Gets the value of the cookie with the given name, else default.
Note that cookies only really work for web apps.
"""
from tornado.web import decode_signed_value
if name in self._cookies:
value = self._cookies[name].value
value = decode_signed_value(config.cookie_secret,
name, value, max_age_days=max_age_days,
min_version=min_version)
return value.decode()
else:
return default
def set_cookie(self, name, value, expires_days=30, version=None,
domain=None, expires=None, path="/", **kwargs):
""" Sets the given cookie name/value with the given options. Set value
to None to clear. The cookie value is secured using
`flexx.config.cookie_secret`; don't forget to set that config
value in your server. Additional keyword arguments are set on
the Cookie.Morsel directly.
"""
# This code is taken (in modified form) from the Tornado project
# Copyright 2009 Facebook
# Licensed under the Apache License, Version 2.0
# Assume tornado is available ...
from tornado.escape import native_str
from tornado.httputil import format_timestamp
from tornado.web import create_signed_value
# Clear cookie?
if value is None:
value = ""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
else:
secret = config.cookie_secret
value = create_signed_value(secret, name, value, version=version,
key_version=None)
# The cookie library only accepts type str, in both python 2 and 3
name = native_str(name)
value = native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if name in self._cookies:
del self._cookies[name]
self._cookies[name] = value
morsel = self._cookies[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
self.send_command('EXEC', 'document.cookie = "%s";' %
morsel.OutputString().replace('"', '\\"'))
## Data
def add_data(self, name, data):
""" Add data to serve to the client (e.g. images), specific to this
session. Returns the link at which the data can be retrieved.
Note that actions can be used to send (binary) data directly
to the client (over the websocket).
Parameters:
name (str): the name of the data, e.g. 'icon.png'. If data has
already been set on this name, it is overwritten.
data (bytes): the data blob.
Returns:
str: the (relative) url at which the data can be retrieved.
"""
if not isinstance(name, str):
raise TypeError('Session.add_data() name must be a str.')
if name in self._data:
raise ValueError('Session.add_data() got existing name %r.' % name)
if not isinstance(data, bytes):
raise TypeError('Session.add_data() data must be bytes.')
self._data[name] = data
return 'flexx/data/%s/%s' % (self.id, name) # relative path for export
def remove_data(self, name):
""" Remove the data associated with the given name. If you need this,
consider using actions instead. Note that data is automatically
released when the session is closed.
"""
self._data.pop(name, None)
def get_data_names(self):
""" Get a list of names of the data provided by this session.
"""
return list(self._data.keys())
def get_data(self, name):
""" Get the data corresponding to the given name. This can be
data local to the session, or global data. Returns None if data
by that name is unknown.
"""
if True:
data = self._data.get(name, None)
if data is None:
data = self._store.get_data(name)
return data
def _dump_data(self):
""" Get a dictionary that contains all data specific to this session.
The keys represent relative paths, the values are all bytes.
Private method, used by App.dump().
"""
d = {}
for fname in self.get_data_names():
d['flexx/data/{}/{}'.format(self.id, fname)] = self.get_data(fname)
return d
## Keeping track of component objects
def _register_component(self, component, id=None):
""" Called by PyComponent and JsComponent to give them an id
and register with the session.
"""
assert isinstance(component, (PyComponent, JsComponent))
assert component.session is self
cls = component.__class__
if self._component is None:
self._component = component # register root component (i.e. the app)
# Set id
if id is None:
self._component_counter += 1
id = cls.__name__ + '_' + str(self._component_counter)
component._id = id
component._uid = self.id + '_' + id
# Register the instance using a weakref
self._component_instances[component._id] = component
# Register the class to that the client has the needed definitions
self._register_component_class(cls)
self.keep_alive(component)
def _unregister_component(self, component):
self._dead_component_ids.add(component.id)
# self.keep_alive(component) # does not work on pypy; deletion in final
# Because we use weak refs, and we want to be able to keep (the id of)
# the object so that INVOKE on it can be silently ignored (because it
# is disposed). The object id gets removed by the DISPOSE_ACK command.
def get_component_instance(self, id):
""" Get PyComponent or JsComponent instance that is associated with
this session and has the corresponding id. The returned value can be
None if it does not exist, and a returned component can be disposed.
"""
return self._component_instances.get(id, None)
## JIT asset definitions
def _register_component_class(self, cls):
""" Mark the given PyComponent or JsComponent class as used; ensure
that the client knows about the module that it is defined in,
dependencies of this module, and associated assets of any of these
modules.
"""
if not (isinstance(cls, type) and issubclass(cls, (PyComponent, JsComponent))):
raise TypeError('_register_component_class() needs a PyComponent '
'or JsComponent class')
# Early exit if we know the class already
if cls in self._present_classes:
return
# Make sure that no two Component classes have the same name, or we get problems
# that are difficult to debug. Unless classes are defined interactively.
# The modules of classes that are re-registered are re-defined. The base
# class of such a component is assumed to be either unchanged or defined
# in the same module. It can also happen that a class is registered for
# which the module was defined earlier (e.g. ui.html). Such modules
# are redefined as well.
same_name = [c for c in self._present_classes if c.__name__ == cls.__name__]
if same_name:
is_interactive = self._app_name == '__default__'
same_name.append(cls)
is_dynamic_cls = all([c.__module__ == '__main__' for c in same_name])
if not (is_interactive and is_dynamic_cls):
raise RuntimeError('Cannot have multiple Component classes with '
'the same name unless using interactive session '
'and the classes are dynamically defined: %r'
% same_name)
# Mark the class and the module as used
logger.debug('Registering Component class %r' % cls.__name__)
self._register_module(cls.__jsmodule__)
def _register_module(self, mod_name):
""" Register a module with the client, as well as its
dependencies, and associated assests of the module and its
dependencies. If the module was already defined, it is
re-defined.
"""
if (mod_name.startswith(('flexx.app', 'flexx.event')) and
'.examples' not in mod_name):
return # these are part of flexx core assets
modules = set()
assets = []
def collect_module_and_deps(mod):
if mod.name.startswith(('flexx.app', 'flexx.event')):
return # these are part of flexx core assets
if mod.name not in self._present_modules:
self._present_modules.add(mod.name)
for dep in mod.deps:
if dep.startswith(('flexx.app', 'flexx.event')):
continue
submod = self._store.modules[dep]
collect_module_and_deps(submod)
modules.add(mod)
# Collect module and dependent modules that are not yet defined
self._store.update_modules() # Ensure up-to-date module definition
mod = self._store.modules[mod_name]
collect_module_and_deps(mod)
f = lambda m: (m.name.startswith('__main__'), m.name)
modules = solve_dependencies(sorted(modules, key=f))
# Collect associated assets
for mod in modules:
for asset_name in self._store.get_associated_assets(mod.name):
if asset_name not in self._present_assets:
self._present_assets.add(asset_name)
assets.append(self._store.get_asset(asset_name))
# If the module was already defined and thus needs to be re-defined,
# we only redefine *this* module, no deps and no assoctated assets.
if not modules:
modules.append(mod)
# Collect CSS and JS assets
for mod in modules:
if mod.get_css().strip():
assets.append(self._store.get_asset(mod.name + '.css'))
for mod in modules:
assets.append(self._store.get_asset(mod.name + '.js'))
# Mark classes as used
for mod in modules:
for cls in mod.component_classes:
self._present_classes.add(cls)
# Push assets over the websocket. Note how this works fine with the
# notebook because we turn ws commands into display(HTML()).
# JS can be defined via eval() or by adding a <script> to the DOM.
# The latter allows assets that do not use strict mode, but sourceURL
# does not work on FF. So we only want to eval our own assets.
for asset in assets:
if asset.name in self._assets_to_ignore:
continue
logger.debug('Loading asset %s' % asset.name)
# Determine command suffix. All our sources come in bundles,
# for which we use eval because it makes sourceURL work on FF.
# (It does not work in Chrome in either way.)
suffix = asset.name.split('.')[-1].upper()
if suffix == 'JS' and isinstance(asset, Bundle):
suffix = 'JS-EVAL'
self.send_command('DEFINE', suffix, asset.name, asset.to_string())
## Communication with the client
def send_command(self, *command):
""" Send a command to the other side. Commands consists of at least one
argument (a string representing the type of command).
"""
assert len(command) >= 1
if self._closing:
pass
elif self.status == self.STATUS.CONNECTED:
self._ws.write_command(command)
elif self.status == self.STATUS.PENDING:
self._pending_commands.append(command)
else:
#raise RuntimeError('Cannot send commands; app is closed')
logger.warning('Cannot send commands; app is closed')
def _receive_command(self, command):
""" Received a command from JS.
"""
cmd = command[0]
if cmd == 'EVALRESULT':
self._eval_result[command[2]] = command[1]
elif cmd == 'PRINT':
print('JS:', command[1])
elif cmd == 'INFO':
logger.info('JS: ' + command[1])
elif cmd == 'WARN':
logger.warning('JS: ' + command[1])
elif cmd == 'ERROR':
logger.error('JS: ' + command[1] +
' - stack trace in browser console (hit F12).')
elif cmd == 'INVOKE':
id, name, args = command[1:]
ob = self.get_component_instance(id)
if ob is None:
if id not in self._dead_component_ids:
t = 'Cannot invoke %s.%s; session does not know it (anymore).'
logger.warning(t % (id, name))
elif ob._disposed:
pass # JS probably send something before knowing the object was dead
else:
func = getattr(ob, name, None)
if func:
func(*args)
elif cmd == 'PONG':
self._receive_pong(command[1])
elif cmd == 'INSTANTIATE':
modulename, cname, id, args, kwargs = command[1:]
# Maybe we still have the instance?
c = self.get_component_instance(id)
if c and not c._disposed:
self.keep_alive(c)
return
# Try to find the class
m, cls, e = None, None, 0
if modulename in assetstore.modules:
m = sys.modules[modulename]
cls = getattr(m, cname, None)
if cls is None:
e = 1
elif not (isinstance(cls, type) and issubclass(cls, JsComponent)):
cls, e = None, 2
elif cls not in AppComponentMeta.CLASSES:
cls, e = None, 3
if cls is None:
raise RuntimeError('Cannot INSTANTIATE %s.%s (%i)' %
(modulename, cname, e))
# Instantiate
kwargs['flx_session'] = self
kwargs['flx_id'] = id
assert len(args) == 0
c = cls(**kwargs) # calls keep_alive via _register_component()
elif cmd == 'DISPOSE': # Gets send from local to proxy
id = command[1]
c = self.get_component_instance(id)
if c and not c._disposed: # no need to warn if component does not exist
c._dispose()
self.send_command('DISPOSE_ACK', command[1])
self._component_instances.pop(id, None) # Drop local ref now
elif cmd == 'DISPOSE_ACK': # Gets send from proxy to local
self._component_instances.pop(command[1], None)
self._dead_component_ids.discard(command[1])
else:
logger.error('Unknown command received from JS:\n%s' % command)
def keep_alive(self, ob, iters=1):
""" Keep an object alive for a certain amount of time, expressed
in Python-JS ping roundtrips. This is intended for making JsComponent
(i.e. proxy components) survice the time between instantiation
triggered from JS and their attachement to a property, though any type
of object can be given.
"""
ping_to_schedule_at = self._ping_counter + iters
el = self._get_ping_call_list(ping_to_schedule_at)
el[1][id(ob)] = ob # add to dict of objects to keep alive
def call_after_roundtrip(self, callback, *args):
""" A variant of ``call_soon()`` that calls a callback after
a py-js roundrip. This can be convenient to delay an action until
after other things have settled down.
"""
# The ping_counter represents the ping count that is underway.
# Since we want at least a full ping, we want one count further.
ping_to_schedule_at = self._ping_counter + 1
el = self._get_ping_call_list(ping_to_schedule_at)
el.append((callback, args))
async def co_roundtrip(self):
""" Coroutine to wait for one Py-JS-Py roundtrip.
"""
count = 0
def up():
nonlocal count
count += 1
self.call_after_roundtrip(up)
while count < 1:
await asyncio.sleep(0.02)
async def co_eval(self, js):
""" Coroutine to evaluate JS in the client, wait for the result,
and then return it. It is recomended to use this method only
for testing purposes.
"""
id = self._eval_count
self._eval_count += 1
self.send_command('EVALANDRETURN', js, id)
while id not in self._eval_result:
await asyncio.sleep(0.2)
return self._eval_result.pop(id)
def _get_ping_call_list(self, ping_count):
""" Get an element from _ping_call for the specified ping_count.
The element is a list [ping_count, {objects}, *(callback, args)]
"""
# No pending ping_calls?
if len(self._ping_calls) == 0:
# Start pinging
send_ping_later(self)
# Append element
el = [ping_count, {}]
self._ping_calls.append(el)
return el
# Try to find existing element, or insert it
for i in reversed(range(len(self._ping_calls))):
el = self._ping_calls[i]
if el[0] == ping_count:
return el
elif el[0] < ping_count:
el = [ping_count, {}]
self._ping_calls.insert(i + 1, el)
return el
else:
el = [ping_count, {}]
self._ping_calls.insert(0, el)
return el
def _receive_pong(self, count):
# Process ping calls
while len(self._ping_calls) > 0 and self._ping_calls[0][0] <= count:
_, objects, *callbacks = self._ping_calls.pop(0)
objects.clear()
del objects
for callback, args in callbacks:
asyncio.get_event_loop().call_soon(callback, *args)
# Continue pinging?
if len(self._ping_calls) > 0:
send_ping_later(self)
def send_ping_later(session):
# This is to prevent the prevention of the session from being discarded due
# to a ref lingering in an asyncio loop.
def x(weaksession):
s = weaksession()
if s is not None and s.status > 0:
s._ping_counter += 1
s.send_command('PING', s._ping_counter)
# asyncio.get_event_loop().call_soon(x, weakref.ref(session))
asyncio.get_event_loop().call_later(0.01, x, weakref.ref(session))
## Functions to get page
# These could be methods, but are only for internal use
def get_page(session):
""" Get the string for the HTML page to render this session's app.
Not a lot; all other JS and CSS assets are pushed over the websocket.
"""
css_assets = [assetstore.get_asset('reset.css')]
js_assets = [assetstore.get_asset('flexx-core.js')]
return _get_page(session, js_assets, css_assets, 3, False)
def get_page_for_export(session, commands, link=0):
""" Get the string for an exported HTML page (to run without a server).
In this case, there is no websocket to push JS/CSS assets over; these
need to be included inside or alongside the main html page.
"""
# This function basically collects all assets that the session needs,
# creates a special -export.js asset that executes the given commands,
# and puts it al together using _get_page().
# We start as a normal page ...
css_assets = [assetstore.get_asset('reset.css')]
js_assets = [assetstore.get_asset('flexx-core.js')]
# Get all the used modules
modules = [assetstore.modules[name] for name in session.present_modules]
f = lambda m: (m.name.startswith('__main__'), m.name)
modules = solve_dependencies(sorted(modules, key=f))
# First the associated assets
asset_names = set()
for mod in modules:
for asset_name in assetstore.get_associated_assets(mod.name):
if asset_name not in asset_names:
asset_names.add(asset_name)
asset = assetstore.get_asset(asset_name)
if asset.name.lower().endswith('.js'):
js_assets.append(asset)
else:
css_assets.append(asset)
# Then the modules themselves
for mod in modules:
if mod.get_css().strip():
css_assets.append(assetstore.get_asset(mod.name + '.css'))
for mod in modules:
js_assets.append(assetstore.get_asset(mod.name + '.js'))
# Create asset for launching the app (commands that normally get send
# over the websocket)
lines = []
lines.append('flexx.is_exported = true;\n')
lines.append('flexx.run_exported_app = function () {')
lines.append(' var commands_b64 = [')
for command in commands:
if command[0] != 'DEFINE':
command_str = base64.encodebytes(serializer.encode(command)).decode()
lines.append(' "' + command_str.replace('\n', '') + '",')
lines.append(' ];')
lines.append(' bb64 = flexx.require("bb64");')
lines.append(' for (var i=0; i<commands_b64.length; i++) {')
lines.append(' var command = flexx.serializer.decode('
'bb64.decode(commands_b64[i]));')
lines.append(' flexx.s1._receive_command(command);')
lines.append(' }\n};\n')
# Create a session asset for it, "-export.js" is always embedded
export_asset = Asset('flexx-export.js', '\n'.join(lines))
js_assets.append(export_asset)
# Combine it all
return _get_page(session, js_assets, css_assets, link, True)
def _get_page(session, js_assets, css_assets, link, export):
""" Compose index page. Depending on the value of link and the types
of assets, the assets are either embedded or linked.
"""
pre_path = 'flexx/assets' if export else '/flexx/assets' # relative / abs
codes = []
for assets in [css_assets, js_assets]:
for asset in assets:
if link in (0, 1):
html = asset.to_html('{}', link)
else:
if asset.name.endswith(('-info.js', '-export.js')):
# Special case, is always embedded, see get_page_for_export()
html = asset.to_html('', 0)
else:
html = asset.to_html(pre_path + '/shared/{}', link)
codes.append(html)
if export and assets is js_assets:
codes.append('<script>window.flexx.spin();</script>')
codes.append('') # whitespace between css and js assets
codes.append('<script>flexx.create_session("%s", "%s");</script>\n' %
(session.app_name, session.id))
headers = session.app.headers if hasattr(session.app, 'headers') else ''
src = INDEX.replace('HEADER-HOOK', headers)
if link in (0, 1):
asset_names = [a.name for a in css_assets + js_assets]
toc = '<!-- Contents:\n\n- ' + '\n- '.join(asset_names) + '\n\n-->'
codes.insert(0, toc)
src = src.replace('ASSET-HOOK', '\n\n\n'.join(codes))
else:
src = src.replace('ASSET-HOOK', '\n'.join(codes))
return src
| |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class SplinePlugTest( GafferTest.TestCase ) :
def testSplineDefinition( self ) :
d = Gaffer.SplineDefinitionff( ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)), Gaffer.SplineDefinitionInterpolation.Linear )
self.assertEqual( d.points(), ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)) )
self.assertEqual( d.interpolation, Gaffer.SplineDefinitionInterpolation.Linear )
self.assertTrue( d.trimEndPoints() )
self.assertEqual( d.points(), ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)) )
d = Gaffer.SplineDefinitionff( ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)), Gaffer.SplineDefinitionInterpolation.CatmullRom )
self.assertEqual( d.points(), ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)) )
self.assertEqual( d.interpolation, Gaffer.SplineDefinitionInterpolation.CatmullRom )
self.assertTrue( d.trimEndPoints() )
self.assertEqual( d.points(), ((0, 0), (0,0), (1,1), (1,1)) )
d = Gaffer.SplineDefinitionff( ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)), Gaffer.SplineDefinitionInterpolation.BSpline )
self.assertEqual( d.points(), ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)) )
self.assertEqual( d.interpolation, Gaffer.SplineDefinitionInterpolation.BSpline )
self.assertTrue( d.trimEndPoints() )
self.assertEqual( d.points(), ((0, 0), (1,1)) )
d = Gaffer.SplineDefinitionff( ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)), Gaffer.SplineDefinitionInterpolation.MonotoneCubic )
self.assertEqual( d.points(), ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)) )
self.assertEqual( d.interpolation, Gaffer.SplineDefinitionInterpolation.MonotoneCubic )
self.assertTrue( d.trimEndPoints() )
self.assertEqual( d.points(), ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1)) )
d = Gaffer.SplineDefinitionff( ((0, 0), (0,0), (1,1), (1,1)), Gaffer.SplineDefinitionInterpolation.BSpline )
self.assertFalse( d.trimEndPoints() ) # Not enough CVs for BSpline
d = Gaffer.SplineDefinitionff( ((0, 0), (0,0), (0,0), (1,1), (1,1), (1,1.1)), Gaffer.SplineDefinitionInterpolation.BSpline )
self.assertFalse( d.trimEndPoints() ) # Endpoints don't match
def testConstructor( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p = Gaffer.SplineffPlug( "a", defaultValue=s )
self.assertEqual( p.getValue(), s )
s2 = Gaffer.SplineDefinitionff(
(
( 1, 1 ),
( 1, 1 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 0, 0 ),
( 0, 0 ),
),
Gaffer.SplineDefinitionInterpolation.Linear
)
p.setValue( s2 )
self.assertEqual( p.getValue(), s2 )
def testSerialisation( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p = Gaffer.SplineffPlug( "a", defaultValue=s, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( p.getValue(), s )
sn = Gaffer.ScriptNode()
sn["n"] = Gaffer.Node()
sn["n"]["p"] = p
se = sn.serialise()
sn = Gaffer.ScriptNode()
sn.execute( se )
self.assertEqual( sn["n"]["p"].getValue(), s )
self.assertEqual( len( sn["n"]["p"].pointPlug( 0 ) ), 2 )
self.assertEqual( sn["n"]["p"].pointPlug( 0 ).keys(), [ "x", "y" ] )
def testSerialisationWithNonDefaultValue( self ) :
defaultSpline = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
sn = Gaffer.ScriptNode()
sn["n"] = Gaffer.Node()
sn["n"]["p"] = Gaffer.SplineffPlug( "a", defaultValue=defaultSpline, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( sn["n"]["p"].getValue(), defaultSpline )
valueSpline = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
sn["n"]["p"].setValue( valueSpline )
self.assertEqual( sn["n"]["p"].getValue(), valueSpline )
se = sn.serialise()
sn = Gaffer.ScriptNode()
sn.execute( se )
self.assertEqual( sn["n"]["p"].getValue(), valueSpline )
def testPointAccess( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p = Gaffer.SplineffPlug( "a", defaultValue=s, flags=Gaffer.Plug.Flags.Dynamic )
self.assertEqual( p.numPoints(), 4 )
for i in range( p.numPoints() ) :
self.assertTrue( p.pointXPlug( i ).isInstanceOf( Gaffer.FloatPlug.staticTypeId() ) )
self.assertTrue( p.pointYPlug( i ).isInstanceOf( Gaffer.FloatPlug.staticTypeId() ) )
self.assertTrue( p.pointXPlug( i ).parent().isSame( p.pointPlug( i ) ) )
self.assertTrue( p.pointYPlug( i ).parent().isSame( p.pointPlug( i ) ) )
# accessing nonexistent points should raise exceptions
self.assertRaises( Exception, p.pointPlug, 4 )
self.assertRaises( Exception, p.pointXPlug, 4 )
self.assertRaises( Exception, p.pointYPlug, 4 )
def testPointDeletion( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p = Gaffer.SplineffPlug( "a", defaultValue=s, flags=Gaffer.Plug.Flags.Dynamic )
self.assertEqual( p.numPoints(), 4 )
for i in range( p.numPoints() ) :
self.assertIsNotNone( p.pointPlug( i ) )
self.assertIsNotNone( p.pointXPlug( i ) )
self.assertIsNotNone( p.pointYPlug( i ) )
p.removePoint( 0 )
self.assertEqual( p.numPoints(), 3 )
for i in range( p.numPoints() ) :
self.assertIsNotNone( p.pointPlug( i ) )
self.assertIsNotNone( p.pointXPlug( i ) )
self.assertIsNotNone( p.pointYPlug( i ) )
p.removeChild( p.pointPlug( 0 ) )
self.assertEqual( p.numPoints(), 2 )
for i in range( p.numPoints() ) :
self.assertIsNotNone( p.pointPlug( i ) )
self.assertIsNotNone( p.pointXPlug( i ) )
self.assertIsNotNone( p.pointYPlug( i ) )
def testPointTampering( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p = Gaffer.SplineffPlug( "a", defaultValue=s, flags=Gaffer.Plug.Flags.Dynamic )
del p.pointPlug( 0 )["x"]
del p.pointPlug( 0 )["y"]
self.assertRaises( Exception, p.pointXPlug, 0 )
self.assertRaises( Exception, p.pointYPlug, 0 )
def testPlugSetSignal( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p = Gaffer.SplineffPlug( "a", defaultValue=s, flags=Gaffer.Plug.Flags.Dynamic )
n = Gaffer.Node()
n["p"] = p
self.__plugSetCount = 0
def plugSet( plug ) :
if plug.isSame( p ) :
self.__plugSetCount += 1
c = n.plugSetSignal().connect( plugSet )
p.pointYPlug( 2 ).setValue( 1.0 )
self.assertEqual( self.__plugSetCount, 1 )
pointIndex = p.addPoint()
self.assertEqual( self.__plugSetCount, 2 )
p.removePoint( pointIndex )
self.assertEqual( self.__plugSetCount, 3 )
def testDefaultValue( self ) :
s1 = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
s2 = Gaffer.SplineDefinitionff(
(
( 1, 1 ),
( 0, 0 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p = Gaffer.SplineffPlug( "a", defaultValue=s1, flags=Gaffer.Plug.Flags.Dynamic )
self.assertEqual( p.defaultValue(), s1 )
self.assertEqual( p.getValue(), s1 )
self.assertTrue( p.isSetToDefault() )
p.setValue( s2 )
self.assertEqual( p.defaultValue(), s1 )
self.assertEqual( p.getValue(), s2 )
self.assertFalse( p.isSetToDefault() )
p.setToDefault()
self.assertEqual( p.defaultValue(), s1 )
self.assertEqual( p.getValue(), s1 )
self.assertTrue( p.isSetToDefault() )
p.setValue( s2 )
p.resetDefault()
self.assertEqual( p.defaultValue(), s2 )
self.assertEqual( p.getValue(), s2 )
self.assertTrue( p.isSetToDefault() )
def testPlugFlags( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p = Gaffer.SplineffPlug( "a", defaultValue=s )
self.assertEqual( p.pointXPlug( 0 ).getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( p.pointYPlug( 0 ).getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
def testConnection( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p1 = Gaffer.SplineffPlug( defaultValue=s )
p2 = Gaffer.SplineffPlug( defaultValue=s )
p1.setInput( p2 )
self.assertTrue( p1.getInput().isSame( p2 ) )
self.assertTrue( p1["interpolation"].getInput().isSame( p2["interpolation"] ) )
for i in range( 0, 4 ) :
self.assertTrue( p1.pointPlug( i ).getInput().isSame( p2.pointPlug( i ) ) )
def testCreateCounterpart( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p1 = Gaffer.SplineffPlug( defaultValue=s )
p2 = p1.createCounterpart( "p2", Gaffer.Plug.Direction.In )
self.assertEqual( p2.getName(), "p2" )
self.assertTrue( isinstance( p2, Gaffer.SplineffPlug ) )
self.assertEqual( p2.numPoints(), p1.numPoints() )
self.assertTrue( p2.getValue(), p1.getValue() )
self.assertTrue( p2.defaultValue(), p1.defaultValue() )
def testPromoteToBox( self ) :
spline = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.SplineffPlug( defaultValue=spline )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["n"] ] ) )
p = Gaffer.PlugAlgo.promote( b["n"]["p"] )
self.assertEqual( p.defaultValue(), b["n"]["p"].defaultValue() )
self.assertEqual( p.numPoints(), b["n"]["p"].numPoints() )
self.assertEqual( p.getValue().interpolation, b["n"]["p"].getValue().interpolation )
self.assertEqual( len( p.getValue().points() ), len( b["n"]["p"].getValue().points() ) )
self.assertEqual( p.getValue(), b["n"]["p"].getValue() )
self.assertTrue( b["n"]["p"].getInput().isSame( p ) )
def testSerialisationWithMorePointsThanDefault( self ) :
s1 = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
s2 = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.SplineffPlug( defaultValue=s1, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( s["n"]["p"].getValue(), s1 )
s["n"]["p"].setValue( s2 )
self.assertEqual( s["n"]["p"].getValue(), s2 )
se = s.serialise()
s = Gaffer.ScriptNode()
s.execute( se )
self.assertEqual( s["n"]["p"].getValue(), s2 )
def testSerialisationWithLessPointsThanDefault( self ) :
s1 = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
s2 = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.SplineffPlug( defaultValue=s1, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( s["n"]["p"].getValue(), s1 )
s["n"]["p"].setValue( s2 )
self.assertEqual( s["n"]["p"].getValue(), s2 )
se = s.serialise()
s = Gaffer.ScriptNode()
s.execute( se )
self.assertEqual( s["n"]["p"].getValue(), s2 )
def testDefaultConstructor( self ) :
p = Gaffer.SplineffPlug()
p.getValue()
def testTruncatedDefaultValue( self ) :
defaultValue = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.5, 0.5 ),
( 0.5, 0.5 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
# This tricky value could fool a naive implementation
# of isSetToDefault().
truncatedDefaultValue = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.5, 0.5 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
p = Gaffer.SplineffPlug( "a", defaultValue=defaultValue, flags=Gaffer.Plug.Flags.Dynamic )
p.setValue( truncatedDefaultValue )
self.assertEqual( p.defaultValue(), defaultValue )
self.assertEqual( p.getValue(), truncatedDefaultValue )
self.assertFalse( p.isSetToDefault() )
def testConnectionSerialisation( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
script["n"]["user"]["p1"] = Gaffer.SplineffPlug( defaultValue=s, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["n"]["user"]["p2"] = Gaffer.SplineffPlug( defaultValue=s, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["n"]["user"]["p2"].setInput( script["n"]["user"]["p1"] )
def assertConnection( script ) :
self.assertTrue( script["n"]["user"]["p2"].getInput().isSame( script["n"]["user"]["p1"] ) )
self.assertTrue( script["n"]["user"]["p2"]["interpolation"].getInput().isSame( script["n"]["user"]["p1"]["interpolation"] ) )
for i in range( 0, 4 ) :
self.assertTrue( script["n"]["user"]["p2"].pointPlug( i ).getInput().isSame( script["n"]["user"]["p1"].pointPlug( i ) ) )
assertConnection( script )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
assertConnection( script2 )
def testPartialConnectionSerialisation( self ) :
s = Gaffer.SplineDefinitionff(
(
( 0, 0 ),
( 0.2, 0.3 ),
( 0.4, 0.9 ),
( 1, 1 ),
),
Gaffer.SplineDefinitionInterpolation.CatmullRom
)
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
script["n"]["user"]["s"] = Gaffer.SplineffPlug( defaultValue=s, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["n"]["user"]["x"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["n"]["user"]["y"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["n"]["user"]["s"].pointXPlug( 0 ).setInput( script["n"]["user"]["x"] )
script["n"]["user"]["s"].pointYPlug( 2 ).setInput( script["n"]["user"]["y"] )
def assertConnection( script ) :
self.assertTrue( script["n"]["user"]["s"].getInput() is None )
self.assertTrue( script["n"]["user"]["s"]["interpolation"].getInput() is None )
for i in range( 0, 4 ) :
if i == 0 :
self.assertTrue( script["n"]["user"]["s"].pointXPlug( i ).getInput().isSame( script["n"]["user"]["x"] ) )
else :
self.assertTrue( script["n"]["user"]["s"].pointXPlug( i ).getInput() is None )
if i == 2 :
self.assertTrue( script["n"]["user"]["s"].pointYPlug( i ).getInput().isSame( script["n"]["user"]["y"] ) )
else :
self.assertTrue( script["n"]["user"]["s"].pointYPlug( i ).getInput() is None )
assertConnection( script )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
assertConnection( script2 )
if __name__ == "__main__":
unittest.main()
| |
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('app', '0016_auto_20151019_1506'),
]
operations = [
migrations.AddField(
model_name='events',
name='airport',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-airport"></span><small><small> aeropuerto</small></small>'),
),
migrations.AddField(
model_name='events',
name='amusement_park',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-amusement-park"></span><small><small> parque de diversiones</small></small>'),
),
migrations.AddField(
model_name='events',
name='aquarium',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-aquarium"></span><small><small> acuario</small></small>'),
),
migrations.AddField(
model_name='events',
name='art_gallery',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-art-gallery"></span><small><small> galleria de arte</small></small>'),
),
migrations.AddField(
model_name='events',
name='beauty_salon',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-beauty-salon"></span><small><small> salon de bellesa</small></small>'),
),
migrations.AddField(
model_name='events',
name='bicycle_store',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-bicycle-store"></span><small><small> tienda de bicicletas</small></small>'),
),
migrations.AddField(
model_name='events',
name='bus_station',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-bus-station"></span><small><small> estaci\xc3\xb3n de camiones</small></small>'),
),
migrations.AddField(
model_name='events',
name='campground',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-campground"></span><small><small> campamento</small></small>'),
),
migrations.AddField(
model_name='events',
name='car_dealer',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-car-dealer"></span><small><small> agencia de coches</small></small>'),
),
migrations.AddField(
model_name='events',
name='car_rental',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-car-rental"></span><small><small> renta de coches</small></small>'),
),
migrations.AddField(
model_name='events',
name='car_repair',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-car-repair"></span><small><small> reparaci\xc3\xb3n de coches</small></small>'),
),
migrations.AddField(
model_name='events',
name='car_wash',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-car-wash"></span><small><small> auto lavado</small></small>'),
),
migrations.AddField(
model_name='events',
name='casino',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-casino"></span><small><small> casino</small></small>'),
),
migrations.AddField(
model_name='events',
name='church',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-church"></span><small><small> iglesia</small></small>'),
),
migrations.AddField(
model_name='events',
name='city_hall',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-city-hall"></span><small><small> palacio municipal</small></small>'),
),
migrations.AddField(
model_name='events',
name='clothing_store',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-clothing-store"></span><small><small> tienda de ropa</small></small>'),
),
migrations.AddField(
model_name='events',
name='convenience_store',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-convenience-store"></span><small><small> tienda de conveniencia</small></small>'),
),
migrations.AddField(
model_name='events',
name='courthouse',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-courthouse"></span><small><small> corte</small></small>'),
),
migrations.AddField(
model_name='events',
name='dentist',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-dentist"></span><small><small> dentista</small></small>'),
),
migrations.AddField(
model_name='events',
name='department_store',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-department-store"></span><small><small> tienda departamental</small></small>'),
),
migrations.AddField(
model_name='events',
name='doctor',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-doctor"></span><small><small> doctor</small></small>'),
),
migrations.AddField(
model_name='events',
name='electronics_store',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-electronics-store"></span><small><small> tienda de electr\xc3\xb3nica</small></small>'),
),
migrations.AddField(
model_name='events',
name='embassy',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-embassy"></span><small><small> embajada</small></small>'),
),
migrations.AddField(
model_name='events',
name='food',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-food"></span><small><small> comida</small></small>'),
),
migrations.AddField(
model_name='events',
name='grocery_or_supermarket',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-grocery-or-supermarket"></span><small><small> supermercado</small></small>'),
),
migrations.AddField(
model_name='events',
name='gym',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-gym"></span><small><small> gimnasio </small></small>'),
),
migrations.AddField(
model_name='events',
name='health',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-health"></span><small><small> centro de salud</small></small>'),
),
migrations.AddField(
model_name='events',
name='laundry',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-laundry"></span><small><small> lavander\xc3\xada</small></small>'),
),
migrations.AddField(
model_name='events',
name='lawyer',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-lawyer"></span><small><small> abogado</small></small>'),
),
migrations.AddField(
model_name='events',
name='liquor_store',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-liquor-store"></span><small><small> liquorer\xc3\xada </small></small>'),
),
migrations.AddField(
model_name='events',
name='locksmith',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-locksmith"></span><small><small> cerrajero</small></small>'),
),
migrations.AddField(
model_name='events',
name='meal_delivery',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-meal-delivery"></span><small><small> comida a domicilio</small></small>'),
),
migrations.AddField(
model_name='events',
name='meal_takeaway',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-meal-takeaway"></span><small><small> comida para llevar</small></small>'),
),
migrations.AddField(
model_name='events',
name='museum',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-museum"></span><small><small> museo</small></small>'),
),
migrations.AddField(
model_name='events',
name='night_club',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-night-club"></span><small><small> centro nocturno</small></small>'),
),
migrations.AddField(
model_name='events',
name='park',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-park"></span><small><small> parque</small></small>'),
),
migrations.AddField(
model_name='events',
name='parking',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-parking"></span><small><small> estacionamiento</small></small>'),
),
migrations.AddField(
model_name='events',
name='pet_store',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-pet-store"></span><small><small> tienda de animales</small></small>'),
),
migrations.AddField(
model_name='events',
name='pharmacy',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-pharmacy"></span><small><small> farmacia</small></small>'),
),
migrations.AddField(
model_name='events',
name='physiotherapist',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-physiotherapist"></span><small><small> terapeuta f\xc3\xadsico</small></small>'),
),
migrations.AddField(
model_name='events',
name='post_office',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-post-office"></span><small><small> oficina de correos</small></small>'),
),
migrations.AddField(
model_name='events',
name='rv_park',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-rv-park"></span><small><small> centro de auto-caravanas</small></small>'),
),
migrations.AddField(
model_name='events',
name='shoe_store',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-shoe-store"></span><small><small> tienda de zapatos</small></small>'),
),
migrations.AddField(
model_name='events',
name='shopping_mall',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-shopping-mall"></span><small><small> centro comercial</small></small>'),
),
migrations.AddField(
model_name='events',
name='spa',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-spa"></span><small><small> spa</small></small>'),
),
migrations.AddField(
model_name='events',
name='stadium',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-stadium"></span><small><small> estadio</small></small>'),
),
migrations.AddField(
model_name='events',
name='store',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-store"></span><small><small> tienda</small></small>'),
),
migrations.AddField(
model_name='events',
name='subway_station',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-subway-station"></span><small><small> estaci\xc3\xb3n de metro</small></small>'),
),
migrations.AddField(
model_name='events',
name='taxi_stand',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-taxi-stand"></span><small><small> taxis</small></small>'),
),
migrations.AddField(
model_name='events',
name='train_station',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-train-station"></span><small><small> estaci\xc3\xb3n de tren</small></small>'),
),
migrations.AddField(
model_name='events',
name='travel_agency',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-travel-agency"></span><small><small> agencia de viajes</small></small>'),
),
migrations.AddField(
model_name='events',
name='university',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-university"></span><small><small> universidad</small></small>'),
),
migrations.AddField(
model_name='events',
name='veterinary_care',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-veterinary-care"></span><small><small> cuidado veterinario</small></small>'),
),
migrations.AddField(
model_name='events',
name='zoo',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-zoo"></span><small><small> zool\xc3\xb3gico</small></small>'),
),
migrations.AlterField(
model_name='events',
name='atm',
field=models.BooleanField(default=True, verbose_name=b'<span class="map-icon map-icon-atm"></span><small><small> cajero</small></small>'),
),
migrations.AlterField(
model_name='events',
name='bank',
field=models.BooleanField(default=False, verbose_name=b'<span class="map-icon map-icon-bank"></span><small><small> banco</small></small>'),
),
migrations.AlterField(
model_name='events',
name='bar',
field=models.BooleanField(default=True, verbose_name=b'<span class="map-icon map-icon-bar"></span><small><small> bar</small></small>'),
),
migrations.AlterField(
model_name='events',
name='cafe',
field=models.BooleanField(default=True, verbose_name=b'<span class="map-icon map-icon-cafe"></span><small><small> cafe</small></small>'),
),
migrations.AlterField(
model_name='events',
name='gas_station',
field=models.BooleanField(default=True, verbose_name=b'<span class="map-icon map-icon-gas-station"></span><small><small> gasoliner\xc3\xada</small></small>'),
),
migrations.AlterField(
model_name='events',
name='hospital',
field=models.BooleanField(default=True, verbose_name=b'<span class="map-icon map-icon-hospital"></span><small><small> hospital</small></small>'),
),
migrations.AlterField(
model_name='events',
name='lodging',
field=models.BooleanField(default=True, verbose_name=b'<span class="map-icon map-icon-lodging"></span><small><small> hospedaje</small></small>'),
),
migrations.AlterField(
model_name='events',
name='police',
field=models.BooleanField(default=True, verbose_name=b'<span class="map-icon map-icon-police"></span><small><small> policia</small></small>'),
),
migrations.AlterField(
model_name='events',
name='restaurant',
field=models.BooleanField(default=True, verbose_name=b'<span class="map-icon map-icon-restaurant"></span><small><small> restaurante</small></small>'),
),
migrations.AlterField(
model_name='events',
name='start_time',
field=models.TimeField(default=datetime.time(16, 43, 15, 17163), help_text=b'Indica la hora de inicio del evento', verbose_name=b'Hora de inicio'),
),
migrations.AlterField(
model_name='my_groups',
name='color',
field=models.CharField(default=b'2A1C2E', max_length=25),
),
]
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/TestScript) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class TestScript(domainresource.DomainResource):
""" Describes a set of tests.
TestScript is a resource that specifies a suite of tests against a FHIR
server implementation to determine compliance against the FHIR
specification.
"""
resource_name = "TestScript"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Contact details of the publisher.
List of `TestScriptContact` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date for this version of the TestScript.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the TestScript.
Type `str`. """
self.experimental = None
""" If for testing purposes, not real usage.
Type `bool`. """
self.fixture = None
""" Fixture in the test script - by reference (uri).
List of `TestScriptFixture` items (represented as `dict` in JSON). """
self.identifier = None
""" External identifier.
Type `Identifier` (represented as `dict` in JSON). """
self.metadata = None
""" Required capability that is assumed to function correctly on the
FHIR server being tested.
Type `TestScriptMetadata` (represented as `dict` in JSON). """
self.multiserver = None
""" Whether or not the tests apply to more than one FHIR server.
Type `bool`. """
self.name = None
""" Informal name for this TestScript.
Type `str`. """
self.profile = None
""" Reference of the validation profile.
List of `FHIRReference` items referencing `Resource` (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (Organization or individual).
Type `str`. """
self.requirements = None
""" Scope and Usage this Test Script is for.
Type `str`. """
self.setup = None
""" A series of required setup operations before tests are executed.
Type `TestScriptSetup` (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired.
Type `str`. """
self.teardown = None
""" A series of required clean up steps.
Type `TestScriptTeardown` (represented as `dict` in JSON). """
self.test = None
""" A test in this script.
List of `TestScriptTest` items (represented as `dict` in JSON). """
self.url = None
""" Absolute URL used to reference this TestScript.
Type `str`. """
self.useContext = None
""" Content intends to support these contexts.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.variable = None
""" Placeholder for evaluated elements.
List of `TestScriptVariable` items (represented as `dict` in JSON). """
self.version = None
""" Logical id for this version of the TestScript.
Type `str`. """
super(TestScript, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScript, self).elementProperties()
js.extend([
("contact", "contact", TestScriptContact, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("experimental", "experimental", bool, False, None, False),
("fixture", "fixture", TestScriptFixture, True, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("metadata", "metadata", TestScriptMetadata, False, None, False),
("multiserver", "multiserver", bool, False, None, False),
("name", "name", str, False, None, True),
("profile", "profile", fhirreference.FHIRReference, True, None, False),
("publisher", "publisher", str, False, None, False),
("requirements", "requirements", str, False, None, False),
("setup", "setup", TestScriptSetup, False, None, False),
("status", "status", str, False, None, True),
("teardown", "teardown", TestScriptTeardown, False, None, False),
("test", "test", TestScriptTest, True, None, False),
("url", "url", str, False, None, True),
("useContext", "useContext", codeableconcept.CodeableConcept, True, None, False),
("variable", "variable", TestScriptVariable, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class TestScriptContact(backboneelement.BackboneElement):
""" Contact details of the publisher.
Contacts to assist a user in finding and communicating with the publisher.
"""
resource_name = "TestScriptContact"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name of a individual to contact.
Type `str`. """
self.telecom = None
""" Contact details for individual or publisher.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(TestScriptContact, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptContact, self).elementProperties()
js.extend([
("name", "name", str, False, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
])
return js
class TestScriptFixture(backboneelement.BackboneElement):
""" Fixture in the test script - by reference (uri).
Fixture in the test script - by reference (uri). All fixtures are required
for the test script to execute.
"""
resource_name = "TestScriptFixture"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.autocreate = None
""" Whether or not to implicitly create the fixture during setup.
Type `bool`. """
self.autodelete = None
""" Whether or not to implicitly delete the fixture during teardown.
Type `bool`. """
self.resource = None
""" Reference of the resource.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
super(TestScriptFixture, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptFixture, self).elementProperties()
js.extend([
("autocreate", "autocreate", bool, False, None, False),
("autodelete", "autodelete", bool, False, None, False),
("resource", "resource", fhirreference.FHIRReference, False, None, False),
])
return js
class TestScriptMetadata(backboneelement.BackboneElement):
""" Required capability that is assumed to function correctly on the FHIR
server being tested.
The required capability must exist and are assumed to function correctly on
the FHIR server being tested.
"""
resource_name = "TestScriptMetadata"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.capability = None
""" Capabilities that are assumed to function correctly on the FHIR
server being tested.
List of `TestScriptMetadataCapability` items (represented as `dict` in JSON). """
self.link = None
""" Links to the FHIR specification.
List of `TestScriptMetadataLink` items (represented as `dict` in JSON). """
super(TestScriptMetadata, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptMetadata, self).elementProperties()
js.extend([
("capability", "capability", TestScriptMetadataCapability, True, None, True),
("link", "link", TestScriptMetadataLink, True, None, False),
])
return js
class TestScriptMetadataCapability(backboneelement.BackboneElement):
""" Capabilities that are assumed to function correctly on the FHIR server
being tested.
Capabilities that must exist and are assumed to function correctly on the
FHIR server being tested.
"""
resource_name = "TestScriptMetadataCapability"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.conformance = None
""" Required Conformance.
Type `FHIRReference` referencing `Conformance` (represented as `dict` in JSON). """
self.description = None
""" The expected capabilities of the server.
Type `str`. """
self.destination = None
""" Which server these requirements apply to.
Type `int`. """
self.link = None
""" Links to the FHIR specification.
List of `str` items. """
self.required = None
""" Are the capabilities required?.
Type `bool`. """
self.validated = None
""" Are the capabilities validated?.
Type `bool`. """
super(TestScriptMetadataCapability, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptMetadataCapability, self).elementProperties()
js.extend([
("conformance", "conformance", fhirreference.FHIRReference, False, None, True),
("description", "description", str, False, None, False),
("destination", "destination", int, False, None, False),
("link", "link", str, True, None, False),
("required", "required", bool, False, None, False),
("validated", "validated", bool, False, None, False),
])
return js
class TestScriptMetadataLink(backboneelement.BackboneElement):
""" Links to the FHIR specification.
A link to the FHIR specification that this test is covering.
"""
resource_name = "TestScriptMetadataLink"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Short description.
Type `str`. """
self.url = None
""" URL to the specification.
Type `str`. """
super(TestScriptMetadataLink, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptMetadataLink, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("url", "url", str, False, None, True),
])
return js
class TestScriptSetup(backboneelement.BackboneElement):
""" A series of required setup operations before tests are executed.
"""
resource_name = "TestScriptSetup"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" A setup operation or assert to perform.
List of `TestScriptSetupAction` items (represented as `dict` in JSON). """
self.metadata = None
""" Capabilities that are assumed to function correctly on the FHIR
server being tested.
Type `TestScriptMetadata` (represented as `dict` in JSON). """
super(TestScriptSetup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetup, self).elementProperties()
js.extend([
("action", "action", TestScriptSetupAction, True, None, True),
("metadata", "metadata", TestScriptMetadata, False, None, False),
])
return js
class TestScriptSetupAction(backboneelement.BackboneElement):
""" A setup operation or assert to perform.
Action would contain either an operation or an assertion.
"""
resource_name = "TestScriptSetupAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assert_fhir = None
""" The assertion to perform.
Type `TestScriptSetupActionAssert` (represented as `dict` in JSON). """
self.operation = None
""" The setup operation to perform.
Type `TestScriptSetupActionOperation` (represented as `dict` in JSON). """
super(TestScriptSetupAction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupAction, self).elementProperties()
js.extend([
("assert_fhir", "assert", TestScriptSetupActionAssert, False, None, False),
("operation", "operation", TestScriptSetupActionOperation, False, None, False),
])
return js
class TestScriptSetupActionAssert(backboneelement.BackboneElement):
""" The assertion to perform.
Evaluates the results of previous operations to determine if the server
under test behaves appropriately.
"""
resource_name = "TestScriptSetupActionAssert"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.compareToSourceId = None
""" Id of fixture used to compare the "sourceId/path" evaluations to.
Type `str`. """
self.compareToSourcePath = None
""" XPath or JSONPath expression against fixture used to compare the
"sourceId/path" evaluations to.
Type `str`. """
self.contentType = None
""" xml | json.
Type `str`. """
self.description = None
""" Tracking/reporting assertion description.
Type `str`. """
self.direction = None
""" response | request.
Type `str`. """
self.headerField = None
""" HTTP header field name.
Type `str`. """
self.label = None
""" Tracking/logging assertion label.
Type `str`. """
self.minimumId = None
""" Fixture Id of minimum content resource.
Type `str`. """
self.navigationLinks = None
""" Perform validation on navigation links?.
Type `bool`. """
self.operator = None
""" equals | notEquals | in | notIn | greaterThan | lessThan | empty |
notEmpty | contains | notContains.
Type `str`. """
self.path = None
""" XPath or JSONPath expression.
Type `str`. """
self.resource = None
""" Resource type.
Type `str`. """
self.response = None
""" okay | created | noContent | notModified | bad | forbidden |
notFound | methodNotAllowed | conflict | gone | preconditionFailed
| unprocessable.
Type `str`. """
self.responseCode = None
""" HTTP response code to test.
Type `str`. """
self.sourceId = None
""" Fixture Id of source expression or headerField.
Type `str`. """
self.validateProfileId = None
""" Profile Id of validation profile reference.
Type `str`. """
self.value = None
""" The value to compare to.
Type `str`. """
self.warningOnly = None
""" Will this assert produce a warning only on error?.
Type `bool`. """
super(TestScriptSetupActionAssert, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionAssert, self).elementProperties()
js.extend([
("compareToSourceId", "compareToSourceId", str, False, None, False),
("compareToSourcePath", "compareToSourcePath", str, False, None, False),
("contentType", "contentType", str, False, None, False),
("description", "description", str, False, None, False),
("direction", "direction", str, False, None, False),
("headerField", "headerField", str, False, None, False),
("label", "label", str, False, None, False),
("minimumId", "minimumId", str, False, None, False),
("navigationLinks", "navigationLinks", bool, False, None, False),
("operator", "operator", str, False, None, False),
("path", "path", str, False, None, False),
("resource", "resource", str, False, None, False),
("response", "response", str, False, None, False),
("responseCode", "responseCode", str, False, None, False),
("sourceId", "sourceId", str, False, None, False),
("validateProfileId", "validateProfileId", str, False, None, False),
("value", "value", str, False, None, False),
("warningOnly", "warningOnly", bool, False, None, False),
])
return js
class TestScriptSetupActionOperation(backboneelement.BackboneElement):
""" The setup operation to perform.
The operation to perform.
"""
resource_name = "TestScriptSetupActionOperation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.accept = None
""" xml | json.
Type `str`. """
self.contentType = None
""" xml | json.
Type `str`. """
self.description = None
""" Tracking/reporting operation description.
Type `str`. """
self.destination = None
""" Which server to perform the operation on.
Type `int`. """
self.encodeRequestUrl = None
""" Whether or not to send the request url in encoded format.
Type `bool`. """
self.label = None
""" Tracking/logging operation label.
Type `str`. """
self.params = None
""" Explicitly defined path parameters.
Type `str`. """
self.requestHeader = None
""" Each operation can have one ore more header elements.
List of `TestScriptSetupActionOperationRequestHeader` items (represented as `dict` in JSON). """
self.resource = None
""" Resource type.
Type `str`. """
self.responseId = None
""" Fixture Id of mapped response.
Type `str`. """
self.sourceId = None
""" Fixture Id of body for PUT and POST requests.
Type `str`. """
self.targetId = None
""" Id of fixture used for extracting the [id], [type], and [vid] for
GET requests.
Type `str`. """
self.type = None
""" The setup operation type that will be executed.
Type `Coding` (represented as `dict` in JSON). """
self.url = None
""" Request URL.
Type `str`. """
super(TestScriptSetupActionOperation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionOperation, self).elementProperties()
js.extend([
("accept", "accept", str, False, None, False),
("contentType", "contentType", str, False, None, False),
("description", "description", str, False, None, False),
("destination", "destination", int, False, None, False),
("encodeRequestUrl", "encodeRequestUrl", bool, False, None, False),
("label", "label", str, False, None, False),
("params", "params", str, False, None, False),
("requestHeader", "requestHeader", TestScriptSetupActionOperationRequestHeader, True, None, False),
("resource", "resource", str, False, None, False),
("responseId", "responseId", str, False, None, False),
("sourceId", "sourceId", str, False, None, False),
("targetId", "targetId", str, False, None, False),
("type", "type", coding.Coding, False, None, False),
("url", "url", str, False, None, False),
])
return js
class TestScriptSetupActionOperationRequestHeader(backboneelement.BackboneElement):
""" Each operation can have one ore more header elements.
Header elements would be used to set HTTP headers.
"""
resource_name = "TestScriptSetupActionOperationRequestHeader"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.field = None
""" HTTP header field name.
Type `str`. """
self.value = None
""" HTTP headerfield value.
Type `str`. """
super(TestScriptSetupActionOperationRequestHeader, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionOperationRequestHeader, self).elementProperties()
js.extend([
("field", "field", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
class TestScriptTeardown(backboneelement.BackboneElement):
""" A series of required clean up steps.
A series of operations required to clean up after the all the tests are
executed (successfully or otherwise).
"""
resource_name = "TestScriptTeardown"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" One or more teardown operations to perform.
List of `TestScriptTeardownAction` items (represented as `dict` in JSON). """
super(TestScriptTeardown, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTeardown, self).elementProperties()
js.extend([
("action", "action", TestScriptTeardownAction, True, None, True),
])
return js
class TestScriptTeardownAction(backboneelement.BackboneElement):
""" One or more teardown operations to perform.
The teardown action will only contain an operation.
"""
resource_name = "TestScriptTeardownAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.operation = None
""" The teardown operation to perform.
Type `TestScriptSetupActionOperation` (represented as `dict` in JSON). """
super(TestScriptTeardownAction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTeardownAction, self).elementProperties()
js.extend([
("operation", "operation", TestScriptSetupActionOperation, False, None, False),
])
return js
class TestScriptTest(backboneelement.BackboneElement):
""" A test in this script.
"""
resource_name = "TestScriptTest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" A test operation or assert to perform.
List of `TestScriptTestAction` items (represented as `dict` in JSON). """
self.description = None
""" Tracking/reporting short description of the test.
Type `str`. """
self.metadata = None
""" Capabilities that are expected to function correctly on the FHIR
server being tested.
Type `TestScriptMetadata` (represented as `dict` in JSON). """
self.name = None
""" Tracking/logging name of this test.
Type `str`. """
super(TestScriptTest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTest, self).elementProperties()
js.extend([
("action", "action", TestScriptTestAction, True, None, True),
("description", "description", str, False, None, False),
("metadata", "metadata", TestScriptMetadata, False, None, False),
("name", "name", str, False, None, False),
])
return js
class TestScriptTestAction(backboneelement.BackboneElement):
""" A test operation or assert to perform.
Action would contain either an operation or an assertion.
"""
resource_name = "TestScriptTestAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assert_fhir = None
""" The setup assertion to perform.
Type `TestScriptSetupActionAssert` (represented as `dict` in JSON). """
self.operation = None
""" The setup operation to perform.
Type `TestScriptSetupActionOperation` (represented as `dict` in JSON). """
super(TestScriptTestAction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTestAction, self).elementProperties()
js.extend([
("assert_fhir", "assert", TestScriptSetupActionAssert, False, None, False),
("operation", "operation", TestScriptSetupActionOperation, False, None, False),
])
return js
class TestScriptVariable(backboneelement.BackboneElement):
""" Placeholder for evaluated elements.
Variable is set based either on element value in response body or on header
field value in the response headers.
"""
resource_name = "TestScriptVariable"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.headerField = None
""" HTTP header field name for source.
Type `str`. """
self.name = None
""" Descriptive name for this variable.
Type `str`. """
self.path = None
""" XPath or JSONPath against the fixture body.
Type `str`. """
self.sourceId = None
""" Fixture Id of source expression or headerField within this variable.
Type `str`. """
super(TestScriptVariable, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptVariable, self).elementProperties()
js.extend([
("headerField", "headerField", str, False, None, False),
("name", "name", str, False, None, True),
("path", "path", str, False, None, False),
("sourceId", "sourceId", str, False, None, False),
])
return js
from . import codeableconcept
from . import coding
from . import contactpoint
from . import fhirdate
from . import fhirreference
from . import identifier
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ActionGroupsOperations(object):
"""ActionGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
action_group_name, # type: str
action_group, # type: "_models.ActionGroupResource"
**kwargs # type: Any
):
# type: (...) -> "_models.ActionGroupResource"
"""Create a new action group or update an existing one.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param action_group: The action group to create or use for the update.
:type action_group: ~$(python-base-namespace).v2019_06_01.models.ActionGroupResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2019_06_01.models.ActionGroupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(action_group, 'ActionGroupResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
action_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ActionGroupResource"
"""Get an action group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2019_06_01.models.ActionGroupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
action_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete an action group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
action_group_name, # type: str
action_group_patch, # type: "_models.ActionGroupPatchBody"
**kwargs # type: Any
):
# type: (...) -> "_models.ActionGroupResource"
"""Updates an existing action group's tags. To update other fields use the CreateOrUpdate method.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param action_group_patch: Parameters supplied to the operation.
:type action_group_patch: ~$(python-base-namespace).v2019_06_01.models.ActionGroupPatchBody
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ActionGroupResource, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2019_06_01.models.ActionGroupResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(action_group_patch, 'ActionGroupPatchBody')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ActionGroupResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}'} # type: ignore
def list_by_subscription_id(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ActionGroupList"]
"""Get a list of all action groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ActionGroupList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2019_06_01.models.ActionGroupList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription_id.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ActionGroupList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription_id.metadata = {'url': '/subscriptions/{subscriptionId}/providers/microsoft.insights/actionGroups'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ActionGroupList"]
"""Get a list of all action groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ActionGroupList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~$(python-base-namespace).v2019_06_01.models.ActionGroupList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ActionGroupList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ActionGroupList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups'} # type: ignore
def enable_receiver(
self,
resource_group_name, # type: str
action_group_name, # type: str
enable_request, # type: "_models.EnableRequest"
**kwargs # type: Any
):
# type: (...) -> None
"""Enable a receiver in an action group. This changes the receiver's status from Disabled to
Enabled. This operation is only supported for Email or SMS receivers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param action_group_name: The name of the action group.
:type action_group_name: str
:param enable_request: The receiver to re-enable.
:type enable_request: ~$(python-base-namespace).v2019_06_01.models.EnableRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.enable_receiver.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'actionGroupName': self._serialize.url("action_group_name", action_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(enable_request, 'EnableRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 409]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
enable_receiver.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}/subscribe'} # type: ignore
| |
"""
Command line interface logic.
The application classes in this module are installed as executables via
setuptools entry points.
"""
# import udiskie.depend first - for side effects!
from .depend import has_Notify, has_Gtk, _in_X
import inspect
import logging.config
import traceback
import asyncio
import gbulb
from docopt import docopt, DocoptExit
import udiskie
import udiskie.config
import udiskie.mount
import udiskie.udisks2
from .common import extend, ObjDictView
from .locale import _
__all__ = [
'Daemon',
'Mount',
'Umount',
]
class Choice:
"""Mapping of command line arguments to option values."""
def __init__(self, mapping):
"""Set mapping between arguments and values."""
self._mapping = mapping
def _check(self, args):
"""Exit in case of multiple exclusive arguments."""
if sum(bool(args[arg]) for arg in self._mapping) > 1:
raise DocoptExit(_('These options are mutually exclusive: {0}',
', '.join(self._mapping)))
def __call__(self, args):
"""Get the option value from the parsed arguments."""
self._check(args)
for arg, val in self._mapping.items():
if args[arg] not in (None, False):
return val
def Switch(name):
"""Negatable option."""
return Choice({'--' + name: True,
'--no-' + name: False})
class Value:
"""Option which is given as value of a command line argument."""
def __init__(self, name):
"""Set argument name."""
self._name = name
def __call__(self, args):
"""Get the value of the command line argument."""
return args[self._name]
class OptionalValue:
def __init__(self, name):
"""Set argument name."""
self._name = name
self._choice = Switch(name.lstrip('-'))
def __call__(self, args):
"""Get the value of the command line argument."""
return self._choice(args) and args[self._name]
class SelectLevel(logging.Filter):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno == self.level
class _EntryPoint:
"""
Abstract base class for program entry points.
Implementations need to
- implement :meth:`_init`
- provide a docstring
- extend :cvar:`option_defaults` and :cvar:`option_rules`.
"""
option_defaults = {
'log_level': logging.INFO,
}
option_rules = {
'log_level': Choice({
'--verbose': logging.DEBUG,
'--quiet': logging.ERROR}),
}
usage_remarks = _("""
Note, that the options in the individual groups are mutually exclusive.
The config file can be a JSON or preferrably a YAML file. For an
example, see the MAN page (or doc/udiskie.8.txt in the repository).
""")
def __init__(self, argv=None):
"""Parse command line options, read config and initialize members."""
gbulb.install(gtk=_in_X and _has_Gtk)
# parse program options (retrieve log level and config file name):
args = docopt(self.usage, version='udiskie ' + self.version)
default_opts = self.option_defaults
program_opts = self.program_options(args)
# initialize logging configuration:
log_level = program_opts.get('log_level', default_opts['log_level'])
debug = log_level <= logging.DEBUG
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'plain': {'format': _('%(message)s')},
'detail': {'format': _('%(levelname)s [%(asctime)s] %(name)s: %(message)s')},
},
'filters': {
'info': {'()': 'udiskie.cli.SelectLevel', 'level': logging.INFO},
},
'handlers': {
'info': {'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'formatter': 'plain',
'filters': ['info']},
'error': {'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
'formatter': 'plain',
'level': 'WARNING'},
'debug': {'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
'formatter': 'detail'},
},
# configure root logger:
'root': {
'handlers': ['info', 'debug' if debug else 'error'],
'level': log_level,
},
})
# parse config options
config_file = OptionalValue('--config')(args)
config = udiskie.config.Config.from_file(config_file)
options = {}
options.update(default_opts)
options.update(config.program_options)
options.update(program_opts)
# initialize instance variables
self.config = config
self.options = options
def program_options(self, args):
"""Get program options from docopt parsed options."""
options = {}
for name, rule in self.option_rules.items():
val = rule(args)
if val is not None:
options[name] = val
return options
@classmethod
def main(cls, argv=None):
"""Run program. Returns program exit code."""
return cls(argv).run()
@property
def version(self):
"""Version from setuptools metadata."""
return udiskie.__version__
@property
def usage(self):
"""Full usage string."""
return inspect.cleandoc(self.__doc__ + self.usage_remarks)
def _init(self):
"""Return the application main task as Future."""
raise NotImplementedError()
def run(self):
"""Run the main loop. Returns exit code."""
self.mainloop = asyncio.get_event_loop()
try:
return self.mainloop.run_until_complete(
self._start_async_tasks())
except KeyboardInterrupt:
return 1
async def _start_async_tasks(self):
"""Start asynchronous operations."""
try:
self.udisks = await udiskie.udisks2.Daemon.create()
results = await self._init()
return 0 if all(results) else 1
except Exception:
traceback.print_exc()
return 1
class Component:
def __init__(self, create):
self.create = create
self.instance = None
@property
def active(self):
return self.instance is not None and self.instance.active
def activate(self):
if self.instance is None:
self.instance = self.create()
if not self.instance.active:
self.instance.activate()
def deactivate(self):
if self.active:
self.instance.deactivate()
def toggle(self):
if self.active:
self.deactivate()
else:
self.activate()
class Daemon(_EntryPoint):
"""
udiskie: a user-level daemon for auto-mounting.
Usage:
udiskie [options]
udiskie (--help | --version)
General options:
-c FILE, --config=FILE Set config file
-C, --no-config Don't use config file
-v, --verbose Increase verbosity (DEBUG)
-q, --quiet Decrease verbosity
-h, --help Show this help
-V, --version Show version information
Daemon options:
-a, --automount Automount new devices
-A, --no-automount Disable automounting
-n, --notify Show popup notifications
-N, --no-notify Disable notifications
-t, --tray Show tray icon
-s, --smart-tray Auto hide tray icon
-T, --no-tray Disable tray icon
-m MENU, --menu MENU Tray menu [flat/nested]
--appindicator Use appindicator for status icon
--no-appindicator Don't use appindicator
--password-cache MINUTES Set password cache timeout
--no-password-cache Disable password cache
-p COMMAND, --password-prompt COMMAND Command for password retrieval
-P, --no-password-prompt Disable unlocking
--notify-command COMMAND Command to execute on events
--no-notify-command Disable command notifications
Deprecated options:
-f PROGRAM, --file-manager PROGRAM Set program for browsing
-F, --no-file-manager Disable browsing
"""
option_defaults = extend(_EntryPoint.option_defaults, {
'automount': True,
'notify': True,
'tray': False,
'menu': 'flat',
'appindicator': False,
'file_manager': 'xdg-open',
'password_prompt': 'builtin:gui',
'password_cache': False,
'notify_command': None,
})
option_rules = extend(_EntryPoint.option_rules, {
'automount': Switch('automount'),
'notify': Switch('notify'),
'tray': Choice({
'--tray': True,
'--no-tray': False,
'--smart-tray': 'auto'}),
'menu': Value('--menu'),
'appindicator': Switch('appindicator'),
'file_manager': OptionalValue('--file-manager'),
'password_prompt': OptionalValue('--password-prompt'),
'password_cache': OptionalValue('--password-cache'),
'notify_command': OptionalValue('--notify-command'),
})
def _init(self):
import udiskie.prompt
config = self.config
options = self.options
# prepare mounter object
prompt = udiskie.prompt.password(options['password_prompt'])
browser = udiskie.prompt.browser(options['file_manager'])
cache = None
if options['password_cache'] is not False:
import udiskie.cache
timeout = int(options['password_cache']) * 60
cache = udiskie.cache.PasswordCache(timeout)
self.mounter = udiskie.mount.Mounter(
config=config.device_config,
prompt=prompt,
browser=browser,
cache=cache,
udisks=self.udisks)
# check component availability
if options['notify'] and not has_Notify():
libnotify_not_available = _(
"Typelib for 'libnotify' is not available. Possible causes include:"
"\n\t- libnotify is not installed"
"\n\t- the typelib is provided by a separate package"
"\n\t- libnotify was built with introspection disabled"
"\n\nStarting udiskie without notifications.")
logging.getLogger(__name__).error(libnotify_not_available)
options['notify'] = False
if options['tray'] and not _in_X:
no_X_session = _(
"Not run within X session. "
"\nStarting udiskie without tray icon.\n")
logging.getLogger(__name__).error(no_X_session)
options['tray'] = False
if options['tray'] and not has_Gtk(3):
gtk3_not_available = _(
"Typelib for 'Gtk 3.0' is not available. Possible causes include:"
"\n\t- GTK3 is not installed"
"\n\t- the typelib is provided by a separate package"
"\n\t- GTK3 was built with introspection disabled"
"\nStarting udiskie without tray icon.\n")
logging.getLogger(__name__).error(gtk3_not_available)
options['tray'] = False
# start components
tasks = []
self.notify = Component(self._load_notify)
self.statusicon = Component(self._load_statusicon)
self.automounter = Component(self._load_automounter)
if options['notify']:
self.notify.activate()
if options['notify_command']:
# is currently enabled/disabled statically only once:
self.notify_command()
if options['tray']:
self.statusicon.activate()
tasks.append(self.statusicon.instance._icon.task)
else:
tasks.append(asyncio.Future())
if options['automount']:
self.automounter.activate()
tasks.append(self.mounter.add_all())
return asyncio.gather(*tasks)
def _load_notify(self):
import udiskie.notify
from gi.repository import Notify
Notify.init('udiskie')
aconfig = self.config.notification_actions
if self.options['automount']:
aconfig.setdefault('device_added', [])
else:
aconfig.setdefault('device_added', ['mount'])
return udiskie.notify.Notify(
Notify.Notification.new,
mounter=self.mounter,
timeout=self.config.notifications,
aconfig=aconfig)
def notify_command(self):
import udiskie.prompt
return udiskie.prompt.notify_command(
self.options['notify_command'], self.mounter)
def _load_statusicon(self):
import udiskie.tray
options = self.options
if options['tray'] == 'auto':
smart = True
elif options['tray'] is True:
smart = False
else:
raise ValueError("Invalid tray: %s" % (options['tray'],))
icons = udiskie.tray.Icons(self.config.icon_names)
actions = udiskie.mount.DeviceActions(self.mounter)
if options['menu'] == 'flat':
flat = True
# dropped legacy 'nested' mode:
elif options['menu'] in ('smart', 'nested'):
flat = False
else:
raise ValueError("Invalid menu: %s" % (options['menu'],))
menu_maker = udiskie.tray.UdiskieMenu(self, icons, actions, flat)
if options['appindicator']:
import udiskie.appindicator
TrayIcon = udiskie.appindicator.AppIndicatorIcon
else:
TrayIcon = udiskie.tray.TrayIcon
trayicon = TrayIcon(menu_maker, icons)
return udiskie.tray.UdiskieStatusIcon(trayicon, menu_maker, smart)
def _load_automounter(self):
import udiskie.automount
return udiskie.automount.AutoMounter(self.mounter)
class Mount(_EntryPoint):
"""
udiskie-mount: a user-level command line utility for mounting.
Usage:
udiskie-mount [options] (-a | DEVICE...)
udiskie-mount (--help | --version)
General options:
-c FILE, --config=FILE Set config file
-C, --no-config Don't use config file
-v, --verbose Increase verbosity (DEBUG)
-q, --quiet Decrease verbosity
-h, --help Show this help
-V, --version Show version information
Mount options:
-a, --all Mount all handleable devices
-r, --recursive Recursively mount partitions
-R, --no-recursive Disable recursive mounting
-o OPTIONS, --options OPTIONS Mount option list
-p COMMAND, --password-prompt COMMAND Command for password retrieval
-P, --no-password-prompt Disable unlocking
"""
option_defaults = extend(_EntryPoint.option_defaults, {
'recursive': None,
'options': None,
'<device>': None,
'password_prompt': 'builtin:tty',
})
option_rules = extend(_EntryPoint.option_rules, {
'recursive': Switch('recursive'),
'options': Value('--options'),
'<device>': Value('DEVICE'),
'password_prompt': OptionalValue('--password-prompt'),
})
def _init(self):
import udiskie.prompt
config = self.config
options = self.options
device_config = config.device_config
if options['options']:
device_config._filters.insert(0, udiskie.config.MountOptions({
'options': [o.strip() for o in options['options'].split(',')],
}))
prompt = udiskie.prompt.password(options['password_prompt'])
mounter = udiskie.mount.Mounter(
config=config.device_config,
prompt=prompt,
udisks=self.udisks)
recursive = options['recursive']
if options['<device>']:
tasks = [mounter.add(path, recursive=recursive)
for path in options['<device>']]
else:
tasks = [mounter.add_all(recursive=recursive)]
return asyncio.gather(*tasks)
class Umount(_EntryPoint):
"""
udiskie-umount: a user-level command line utility for unmounting.
Usage:
udiskie-umount [options] (-a | DEVICE...)
udiskie-umount (--help | --version)
General options:
-c FILE, --config=FILE Set config file
-C, --no-config Don't use config file
-v, --verbose Increase verbosity (DEBUG)
-q, --quiet Decrease verbosity
-h, --help Show this help
-V, --version Show version information
Unmount options:
-a, --all Unmount all handleable devices
-d, --detach Power off drive if possible
-D, --no-detach Don't power off drive
-e, --eject Eject media from device if possible
-E, --no-eject Don't eject media
-f, --force Force removal (recursive unmounting)
-F, --no-force Don't force removal
-l, --lock Lock device after unmounting
-L, --no-lock Don't lock device
"""
option_defaults = extend(_EntryPoint.option_defaults, {
'detach': None,
'eject': False,
'force': False,
'lock': True,
'<device>': None,
})
option_rules = extend(_EntryPoint.option_rules, {
'detach': Switch('detach'),
'eject': Switch('eject'),
'force': Switch('force'),
'lock': Switch('lock'),
'<device>': Value('DEVICE'),
})
def _init(self):
config = self.config
options = self.options
mounter = udiskie.mount.Mounter(
self.udisks,
config=config.device_config)
strategy = dict(detach=options['detach'],
eject=options['eject'],
lock=options['lock'])
if options['<device>']:
strategy['force'] = options['force']
tasks = [mounter.remove(path, **strategy)
for path in options['<device>']]
else:
tasks = [mounter.remove_all(**strategy)]
return asyncio.gather(*tasks)
def _parse_filter(spec):
try:
key, val = spec.split('=', 1)
except ValueError:
if spec.startswith('!'):
val = False
key = spec[1:]
else:
val = True
key = spec
return key, val
class Info(_EntryPoint):
"""
udiskie-info: get information about handleable devices.
Usage:
udiskie-info [options] [-o OUTPUT] [-f FILTER]... (-a | DEVICE...)
udiskie-info (--help | --version)
General options:
-c FILE, --config=FILE Set config file
-C, --no-config Don't use config file
-v, --verbose Increase verbosity (DEBUG)
-q, --quiet Decrease verbosity
-h, --help Show this help
-V, --version Show version information
Unmount options:
-a, --all List all handleable devices
-o COL, --output COL Specify output columns in a format string
containing the allowed device attributes,
e.g.: "{ui_label} {is_luks}"
[default: device_presentation].
-f FILT, --filter FILT Print only devices that match the given
filter.
"""
option_defaults = extend(_EntryPoint.option_defaults, {
'output': '',
'filter': '',
'<device>': None,
})
option_rules = extend(_EntryPoint.option_rules, {
'output': Value('--output'),
'filter': Value('--filter'),
'<device>': Value('DEVICE'),
})
def _init(self):
config = self.config
options = self.options
mounter = udiskie.mount.Mounter(
self.udisks,
config=config.device_config)
if options['<device>']:
devices = [self.udisks.find(path) for path in options['<device>']]
else:
devices = mounter.get_all_handleable()
DeviceFilter = udiskie.config.DeviceFilter
output = options['output']
# old behaviour: single attribute
if output in DeviceFilter.VALID_PARAMETERS:
def format_output(device):
return getattr(device, output)
# new behaviour: format string
else:
from string import Formatter
formatter = Formatter()
def format_output(device):
view = ObjDictView(device, DeviceFilter.VALID_PARAMETERS)
return formatter.vformat(output, (), view)
filters = [_parse_filter(spec) for spec in options['filter']]
matcher = DeviceFilter(dict(filters))
for device in devices:
if matcher.match(device):
print(format_output(device))
return asyncio.gather()
| |
"""
Name Server and helper functions.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import with_statement
import warnings
import re
import logging
import socket
import sys
from Pyro4.threadutil import RLock, Thread
from Pyro4.errors import NamingError, PyroError, ProtocolError
from Pyro4 import core, socketutil
import Pyro4.constants
__all__ = ["locateNS", "resolve", "startNS", "startNSloop", "MemoryStorage"]
if sys.version_info >= (3, 0):
basestring = str
log = logging.getLogger("Pyro4.naming")
class MemoryStorage(dict):
"""
Storage implementation that is just an in-memory dict.
Stopping the nameserver will make the server instantly forget about everything.
"""
def __init__(self, **kwargs):
super(MemoryStorage, self).__init__(**kwargs)
def optimized_prefix_list(self, prefix):
return None
def optimized_regex_list(self, regex):
return None
def everything(self):
return self.copy()
def remove_items(self, items):
for item in items:
try:
del self[item]
except KeyError:
pass
def close(self):
pass
class NameServer(object):
"""
Pyro name server. Provides a simple flat name space to map logical object names to Pyro URIs.
Default storage is done in an in-memory dictionary. You can provide custom storage types.
"""
def __init__(self, storageProvider=None):
self.storage = storageProvider
if storageProvider is None:
self.storage = MemoryStorage()
log.debug("using volatile in-memory dict storage")
self.lock = RLock()
def count(self):
return len(self.storage)
def lookup(self, name):
"""Lookup the given name, returns an URI if found"""
try:
return core.URI(self.storage[name])
except KeyError:
raise NamingError("unknown name: " + name)
def register(self, name, uri, safe=False):
"""Register a name with an URI. If safe is true, name cannot be registered twice.
The uri can be a string or an URI object."""
if isinstance(uri, core.URI):
uri = uri.asString()
elif not isinstance(uri, basestring):
raise TypeError("only URIs or strings can be registered")
else:
core.URI(uri) # check if uri is valid
if not isinstance(name, basestring):
raise TypeError("name must be a str")
with self.lock:
if safe and name in self.storage:
raise NamingError("name already registered: " + name)
self.storage[name] = uri
def remove(self, name=None, prefix=None, regex=None):
"""Remove a registration. returns the number of items removed."""
if name and name in self.storage and name != Pyro4.constants.NAMESERVER_NAME:
with self.lock:
del self.storage[name]
return 1
if prefix:
items = list(self.list(prefix=prefix).keys())
if Pyro4.constants.NAMESERVER_NAME in items:
items.remove(Pyro4.constants.NAMESERVER_NAME)
self.storage.remove_items(items)
return len(items)
if regex:
items = list(self.list(regex=regex).keys())
if Pyro4.constants.NAMESERVER_NAME in items:
items.remove(Pyro4.constants.NAMESERVER_NAME)
self.storage.remove_items(items)
return len(items)
return 0
# noinspection PyNoneFunctionAssignment
def list(self, prefix=None, regex=None):
"""Retrieve the registered items as a dictionary name-to-URI. The URIs
in the resulting dict are strings, not URI objects.
You can filter by prefix or by regex."""
with self.lock:
if prefix:
result = self.storage.optimized_prefix_list(prefix)
if result is not None:
return result
result = {}
for name in self.storage:
if name.startswith(prefix):
result[name] = self.storage[name]
return result
elif regex:
result = self.storage.optimized_regex_list(regex)
if result is not None:
return result
result = {}
try:
regex = re.compile(regex + "$") # add end of string marker
except re.error:
x = sys.exc_info()[1]
raise NamingError("invalid regex: " + str(x))
else:
for name in self.storage:
if regex.match(name):
result[name] = self.storage[name]
return result
else:
# just return (a copy of) everything
return self.storage.everything()
def ping(self):
"""A simple test method to check if the name server is running correctly."""
pass
class NameServerDaemon(core.Daemon):
"""Daemon that contains the Name Server."""
def __init__(self, host=None, port=None, unixsocket=None, nathost=None, natport=None, storage=None):
if host is None:
host = Pyro4.config.HOST
if port is None:
port = Pyro4.config.NS_PORT
if nathost is None:
nathost = Pyro4.config.NATHOST
if natport is None:
natport = Pyro4.config.NATPORT or None
storage = storage or "memory"
if storage == "memory":
log.debug("using volatile in-memory dict storage")
self.nameserver = NameServer(MemoryStorage())
elif storage.startswith("dbm:") and len(storage) > 4:
dbmfile = storage[4:]
log.debug("using persistent dbm storage in file %s", dbmfile)
from Pyro4.naming_storage import DbmStorage
self.nameserver = NameServer(DbmStorage(dbmfile))
elif storage.startswith("sql:") and len(storage) > 4:
sqlfile = storage[4:]
log.debug("using persistent sql storage in file %s", sqlfile)
from Pyro4.naming_storage import SqlStorage
self.nameserver = NameServer(SqlStorage(sqlfile))
else:
raise ValueError("invalid storage type '%s'" % storage)
existing_count = self.nameserver.count()
if existing_count > 0:
log.debug("number of existing entries in storage: %d", existing_count)
super(NameServerDaemon, self).__init__(host, port, unixsocket, nathost=nathost, natport=natport)
self.register(self.nameserver, Pyro4.constants.NAMESERVER_NAME)
self.nameserver.register(Pyro4.constants.NAMESERVER_NAME, self.uriFor(self.nameserver))
log.info("nameserver daemon created")
def close(self):
super(NameServerDaemon, self).close()
if self.nameserver is not None:
self.nameserver.storage.close()
self.nameserver = None
def __enter__(self):
if not self.nameserver:
raise PyroError("cannot reuse this object")
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.nameserver is not None:
self.nameserver.storage.close()
self.nameserver = None
return super(NameServerDaemon, self).__exit__(exc_type, exc_value, traceback)
def handleRequest(self, conn):
try:
return super(NameServerDaemon, self).handleRequest(conn)
except ProtocolError as x:
# Notify the user that a protocol error occurred.
# This is useful for instance when a wrong serializer is used, it helps
# a lot to immediately see what is going wrong.
warnings.warn("Pyro protocol error occurred: " + str(x))
raise
class BroadcastServer(object):
REQUEST_NSURI = "GET_NSURI" if sys.platform == "cli" else b"GET_NSURI"
def __init__(self, nsUri, bchost=None, bcport=None):
self.nsUri = nsUri
if bcport is None:
bcport = Pyro4.config.NS_BCPORT
if bchost is None:
bchost = Pyro4.config.NS_BCHOST
if ":" in nsUri.host: # ipv6
bchost = bchost or "::"
self.sock = Pyro4.socketutil.createBroadcastSocket((bchost, bcport, 0, 0), reuseaddr=Pyro4.config.SOCK_REUSE, timeout=2.0)
else:
self.sock = Pyro4.socketutil.createBroadcastSocket((bchost, bcport), reuseaddr=Pyro4.config.SOCK_REUSE, timeout=2.0)
self._sockaddr = self.sock.getsockname()
bchost = bchost or self._sockaddr[0]
bcport = bcport or self._sockaddr[1]
if ":" in bchost: # ipv6
self.locationStr = "[%s]:%d" % (bchost, bcport)
else:
self.locationStr = "%s:%d" % (bchost, bcport)
log.info("ns broadcast server created on %s", self.locationStr)
self.running = True
def close(self):
log.debug("ns broadcast server closing")
self.running = False
try:
self.sock.shutdown(socket.SHUT_RDWR)
except (OSError, socket.error):
pass
self.sock.close()
def getPort(self):
return self.sock.getsockname()[1]
def fileno(self):
return self.sock.fileno()
def runInThread(self):
"""Run the broadcast server loop in its own thread. This is mainly for Jython,
which has problems with multiplexing it using select() with the Name server itself."""
thread = Thread(target=self.__requestLoop)
thread.setDaemon(True)
thread.start()
log.debug("broadcast server loop running in own thread")
def __requestLoop(self):
while self.running:
self.processRequest()
log.debug("broadcast server loop terminating")
def processRequest(self):
try:
data, addr = self.sock.recvfrom(100)
if data == self.REQUEST_NSURI:
responsedata = core.URI(self.nsUri)
if responsedata.host == "0.0.0.0":
# replace INADDR_ANY address by the interface IP address that connects to the requesting client
try:
interface_ip = socketutil.getInterfaceAddress(addr[0])
responsedata.host = interface_ip
except socket.error:
pass
log.debug("responding to broadcast request from %s: interface %s", addr[0], responsedata.host)
responsedata = str(responsedata).encode("iso-8859-1")
self.sock.sendto(responsedata, 0, addr)
except socket.error:
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def startNSloop(host=None, port=None, enableBroadcast=True, bchost=None, bcport=None, unixsocket=None, nathost=None, natport=None, storage=None, hmac=None):
"""utility function that starts a new Name server and enters its requestloop."""
daemon = NameServerDaemon(host, port, unixsocket, nathost=nathost, natport=natport, storage=storage)
daemon._pyroHmacKey = hmac
nsUri = daemon.uriFor(daemon.nameserver)
internalUri = daemon.uriFor(daemon.nameserver, nat=False)
bcserver = None
if unixsocket:
hostip = "Unix domain socket"
else:
hostip = daemon.sock.getsockname()[0]
if hostip.startswith("127."):
print("Not starting broadcast server for localhost.")
log.info("Not starting NS broadcast server because NS is bound to localhost")
enableBroadcast = False
if enableBroadcast:
# Make sure to pass the internal uri to the broadcast responder.
# It is almost always useless to let it return the external uri,
# because external systems won't be able to talk to this thing anyway.
bcserver = BroadcastServer(internalUri, bchost, bcport)
print("Broadcast server running on %s" % bcserver.locationStr)
bcserver.runInThread()
existing = daemon.nameserver.count()
if existing > 1: # don't count our own nameserver registration
print("Persistent store contains %d existing registrations." % existing)
print("NS running on %s (%s)" % (daemon.locationStr, hostip))
if not hmac:
print("Warning: HMAC key not set. Anyone can connect to this server!")
if daemon.natLocationStr:
print("internal URI = %s" % internalUri)
print("external URI = %s" % nsUri)
else:
print("URI = %s" % nsUri)
try:
daemon.requestLoop()
finally:
daemon.close()
if bcserver is not None:
bcserver.close()
print("NS shut down.")
def startNS(host=None, port=None, enableBroadcast=True, bchost=None, bcport=None, unixsocket=None, nathost=None, natport=None, storage=None, hmac=None):
"""utility fuction to quickly get a Name server daemon to be used in your own event loops.
Returns (nameserverUri, nameserverDaemon, broadcastServer)."""
daemon = NameServerDaemon(host, port, unixsocket, nathost=nathost, natport=natport, storage=storage)
daemon._pyroHmacKey = hmac
bcserver = None
nsUri = daemon.uriFor(daemon.nameserver)
if not unixsocket:
hostip = daemon.sock.getsockname()[0]
if hostip.startswith("127."):
# not starting broadcast server for localhost.
enableBroadcast = False
if enableBroadcast:
internalUri = daemon.uriFor(daemon.nameserver, nat=False)
bcserver = BroadcastServer(internalUri, bchost, bcport)
return nsUri, daemon, bcserver
def locateNS(host=None, port=None, broadcast=True, hmac_key=None):
"""Get a proxy for a name server somewhere in the network."""
if host is None:
# first try localhost if we have a good chance of finding it there
if Pyro4.config.NS_HOST in ("localhost", "::1") or Pyro4.config.NS_HOST.startswith("127."):
host = Pyro4.config.NS_HOST
if ":" in host: # ipv6
host = "[%s]" % host
uristring = "PYRO:%s@%s:%d" % (Pyro4.constants.NAMESERVER_NAME, host, port or Pyro4.config.NS_PORT)
log.debug("locating the NS: %s", uristring)
proxy = core.Proxy(uristring)
proxy._pyroHmacKey = hmac_key
try:
proxy.ping()
log.debug("located NS")
return proxy
except PyroError:
pass
if broadcast:
# broadcast lookup
if not port:
port = Pyro4.config.NS_BCPORT
log.debug("broadcast locate")
sock = Pyro4.socketutil.createBroadcastSocket(reuseaddr=Pyro4.config.SOCK_REUSE, timeout=0.7)
for _ in range(3):
try:
for bcaddr in Pyro4.config.parseAddressesString(Pyro4.config.BROADCAST_ADDRS):
try:
sock.sendto(BroadcastServer.REQUEST_NSURI, 0, (bcaddr, port))
except socket.error:
x = sys.exc_info()[1]
err = getattr(x, "errno", x.args[0])
if err not in Pyro4.socketutil.ERRNO_EADDRNOTAVAIL: # yeah, windows likes to throw these...
if err not in Pyro4.socketutil.ERRNO_EADDRINUSE: # and jython likes to throw thses...
raise
data, _ = sock.recvfrom(100)
sock.close()
if sys.version_info >= (3, 0):
data = data.decode("iso-8859-1")
log.debug("located NS: %s", data)
return core.Proxy(data)
except socket.timeout:
continue
try:
sock.shutdown(socket.SHUT_RDWR)
except (OSError, socket.error):
pass
sock.close()
log.debug("broadcast locate failed, try direct connection on NS_HOST")
else:
log.debug("skipping broadcast lookup")
# broadcast failed or skipped, try PYRO directly on specific host
host = Pyro4.config.NS_HOST
port = Pyro4.config.NS_PORT
# pyro direct lookup
if not port:
port = Pyro4.config.NS_PORT
if ":" in host:
host = "[%s]" % host
if core.URI.isUnixsockLocation(host):
uristring = "PYRO:%s@%s" % (Pyro4.constants.NAMESERVER_NAME, host)
else:
uristring = "PYRO:%s@%s:%d" % (Pyro4.constants.NAMESERVER_NAME, host, port)
uri = core.URI(uristring)
log.debug("locating the NS: %s", uri)
proxy = core.Proxy(uri)
proxy._pyroHmacKey = hmac_key
try:
proxy.ping()
log.debug("located NS")
return proxy
except PyroError as x:
e = NamingError("Failed to locate the nameserver")
e.__cause__ = x
raise e
def resolve(uri, hmac_key=None):
"""Resolve a 'magic' uri (PYRONAME) into the direct PYRO uri."""
if isinstance(uri, basestring):
uri = core.URI(uri)
elif not isinstance(uri, core.URI):
raise TypeError("can only resolve Pyro URIs")
if uri.protocol == "PYRO":
return uri
log.debug("resolving %s", uri)
if uri.protocol == "PYRONAME":
nameserver = locateNS(uri.host, uri.port, hmac_key=hmac_key)
uri = nameserver.lookup(uri.object)
nameserver._pyroRelease()
return uri
else:
raise PyroError("invalid uri protocol")
def main(args=None):
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-n", "--host", dest="host", help="hostname to bind server on")
parser.add_option("-p", "--port", dest="port", type="int", help="port to bind server on (0=random)")
parser.add_option("-u", "--unixsocket", help="Unix domain socket name to bind server on")
parser.add_option("-s", "--storage", help="Storage system to use (memory, dbm:file, sql:file)", default="memory")
parser.add_option("", "--bchost", dest="bchost", help="hostname to bind broadcast server on (default is \"\")")
parser.add_option("", "--bcport", dest="bcport", type="int",
help="port to bind broadcast server on (0=random)")
parser.add_option("", "--nathost", dest="nathost", help="external hostname in case of NAT")
parser.add_option("", "--natport", dest="natport", type="int", help="external port in case of NAT")
parser.add_option("-x", "--nobc", dest="enablebc", action="store_false", default=True,
help="don't start a broadcast server")
parser.add_option("-k", "--key", help="the HMAC key to use")
options, args = parser.parse_args(args)
startNSloop(options.host, options.port, enableBroadcast=options.enablebc,
bchost=options.bchost, bcport=options.bcport, unixsocket=options.unixsocket,
nathost=options.nathost, natport=options.natport, storage=options.storage,
hmac=options.key)
if __name__ == "__main__":
main()
| |
import re
import os
import hcp
import mne
import numpy as np
from glob import glob
from copy import deepcopy
from joblib import Memory
from hcp.io.file_mapping.file_mapping import kind_map
from ..utils import check_random_state
HCP_DIR = "/storage/store/data/HCP900/"
CONVERSION_MAP = {v: k for k, v in kind_map.items()}
mem = Memory(location='.', verbose=0)
def get_all_records(hcp_path=HCP_DIR):
"""Make a dictionary with all HCP files in the directory hcp_path
Parameters
----------
hcp_path: str
Path in which the HCP files are located
Return
------
db: dict
Dictionary with {data_type: {subject: [run_index_0, run_index_1, ...]}}
The keys are str for the type of exercises and the values are
dictionaries containing a list per subject with the run_indexes.
"""
# List all files with unprocesses
pattern = os.path.join(hcp_path, "*/unprocessed/MEG/*/4D/config")
list_files = glob(pattern)
db = {}
pattern = pattern.replace("*", "(.*)")
pattern = pattern.replace("MEG/", "MEG/\d+-") # noqa
for f_name in list_files:
subject, data_type = re.match(pattern, f_name).groups()
data_type = CONVERSION_MAP[data_type]
type_subjects = db.get(data_type, {})
type_subject_records = type_subjects.get(subject, [])
type_subject_records += [len(type_subject_records)]
type_subjects[subject] = type_subject_records
db[data_type] = type_subjects
print("Found {} types".format(len(db.keys())))
return db
@mem.cache(ignore=['n_jobs'])
def load_one_record(data_type, subject, run_index, sfreq=300, epoch=None,
filter_params=[5., None], n_jobs=1):
# Load the record and correct the sensor space to get proper visualization
print(f"subject={subject}, data_type={data_type}, run_index={run_index}, "
f"hcp_path={HCP_DIR}")
raw = hcp.read_raw(subject, data_type=data_type, run_index=run_index,
hcp_path=HCP_DIR, verbose=0)
raw.load_data()
hcp.preprocessing.map_ch_coords_to_mne(raw)
raw.pick_types(meg='mag', eog=False, stim=True)
# filter the electrical and low frequency components
raw.notch_filter([60, 120], n_jobs=n_jobs)
raw.filter(*filter_params, n_jobs=n_jobs)
# Resample to the requested sfreq
if sfreq is not None:
raw.resample(sfreq=sfreq, n_jobs=n_jobs)
events = mne.find_events(raw)
raw.pick_types(meg='mag', stim=False)
events[:, 0] -= raw.first_samp
# Deep copy before modifying info to avoid issues when saving EvokedArray
info = deepcopy(raw.info)
info['events'] = events
info['event_id'] = np.unique(events[:, 2])
# Return the data
return raw.get_data(), info
def load_data(n_trials=10, data_type='rest', sfreq=150, epoch=None,
filter_params=[5., None], equalize="zeropad", n_jobs=1,
random_state=None):
"""Load and prepare the HCP dataset for multiCSC
Parameters
----------
n_trials : int
Number of recordings that are loaded.
data_type : str
Type of recordings loaded. Should be in {'rest', 'task_working_memory',
'task_motor', 'task_story_math', 'noise_empty_room', 'noise_subject'}.
sfreq : float
Sampling frequency of the signal. The data are resampled to match it.
epoch : tuple or None
If set to a tuple, extract epochs from the raw data, using
t_min=epoch[0] and t_max=epoch[1]. Else, use the raw signal, divided
in n_splits chunks.
filter_params : tuple
Frequency cut for a band pass filter applied to the signals. The
default is a high-pass filter with frequency cut at 2Hz.
n_jobs : int
Number of jobs that can be used for preparing (filtering) the data.
random_state : int | None
State to seed the random number generator.
Return
------
X : ndarray, shape (n_trials, n_channels, n_times)
Signals loaded from HCP.
info : list of mne.Info
List of the info related to each signals.
"""
if data_type == "rest" and epoch is not None:
raise ValueError("epoch != None is not valid with resting-state data.")
rng = check_random_state(random_state)
mne.set_log_level(30)
db = get_all_records()
records = [(subject, run_index)
for subject, runs in db[data_type].items()
for run_index in runs]
X, info = [], []
records = rng.permutation(records)[:n_trials]
for i, (subject, run_index) in enumerate(records):
print("\rLoading HCP subjects: {:7.2%}".format(i / n_trials),
end='', flush=True)
X_n, info_n = load_one_record(
data_type, subject, int(run_index), sfreq=sfreq, epoch=epoch,
filter_params=filter_params, n_jobs=n_jobs)
X += [X_n]
info += [info_n]
print("\rLoading HCP subjects: done ")
X = make_array(X, equalize=equalize)
X /= np.std(X)
return X, info
def data_generator(n_trials=10, data_type='rest', sfreq=150, epoch=None,
filter_params=[5., None], equalize="zeropad", n_jobs=1,
random_state=None):
"""Generator loading subjects from the HCP dataset for multiCSC
Parameters
----------
n_trials : int
Number of recordings that are loaded.
data_type : str
Type of recordings loaded. Should be in {'rest', 'task_working_memory',
'task_motor', 'task_story_math', 'noise_empty_room', 'noise_subject'}.
sfreq : float
Sampling frequency of the signal. The data are resampled to match it.
epoch : tuple or None
If set to a tuple, extract epochs from the raw data, using
t_min=epoch[0] and t_max=epoch[1]. Else, use the raw signal, divided
in n_splits chunks.
filter_params : tuple
Frequency cut for a band pass filter applied to the signals. The
default is a high-pass filter with frequency cut at 2Hz.
n_jobs : int
Number of jobs that can be used for preparing (filtering) the data.
random_state : int | None
State to seed the random number generator.
Yields
------
X : ndarray, shape (1, n_channels, n_times)
Signals loaded from HCP.
info : list of mne.Info
info related to this signal.
"""
if data_type == "rest" and epoch is not None:
raise ValueError("epoch != None is not valid with resting-state data.")
rng = check_random_state(random_state)
mne.set_log_level(30)
db = get_all_records()
records = [(subject, run_index)
for subject, runs in db[data_type].items()
for run_index in runs]
records = rng.permutation(records)[:n_trials]
for i, (subject, run_index) in enumerate(records):
try:
X_n, info_n = load_one_record(
data_type, subject, int(run_index), sfreq=sfreq, epoch=epoch,
filter_params=filter_params, n_jobs=n_jobs)
X_n /= X_n.std()
yield X_n, info_n
except UnicodeDecodeError:
print("failed to load {}-{}-{}"
.format(subject, data_type, run_index))
def make_array(X, equalize='zeropad'):
""""""
x_shape = np.array([x.shape for x in X])
assert np.all(x_shape[..., :-1] == x_shape[0, ..., :-1])
if equalize == "crop":
L = x_shape.min(axis=0)[-1]
X = np.array([x[..., :L] for x in X])
elif equalize == "zeropad":
X_shape = tuple(x_shape.max(axis=0))
X_shape, L = X_shape[:-1], X_shape[-1]
X = np.array([
np.concatenate([x, np.zeros(X_shape + (L - x.shape[-1], ))],
axis=-1) for x in X
])
else:
raise ValueError("The equalize parameter should be in "
f"{'crop', 'zeropad'}, got '{equalize}'.")
return X
| |
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for the `station_plot` module."""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
from metpy.plots import (current_weather, high_clouds, nws_layout, simple_layout, sky_cover,
StationPlot, StationPlotLayout)
from metpy.units import units
@pytest.mark.mpl_image_compare(tolerance=2.444, savefig_kwargs={'dpi': 300}, remove_text=True)
def test_stationplot_api():
"""Test the StationPlot API."""
fig = plt.figure(figsize=(9, 9))
# testing data
x = np.array([1, 5])
y = np.array([2, 4])
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=16)
sp.plot_barb([20, 0], [0, -50])
sp.plot_text('E', ['KOKC', 'ICT'], color='blue')
sp.plot_parameter('NW', [10.5, 15] * units.degC, color='red')
sp.plot_symbol('S', [5, 7], high_clouds, color='green')
sp.ax.set_xlim(0, 6)
sp.ax.set_ylim(0, 6)
return fig
@pytest.mark.mpl_image_compare(tolerance=1.976, savefig_kwargs={'dpi': 300}, remove_text=True)
def test_stationplot_clipping():
"""Test the that clipping can be enabled as a default parameter."""
fig = plt.figure(figsize=(9, 9))
# testing data
x = np.array([1, 5])
y = np.array([2, 4])
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=16, clip_on=True)
sp.plot_barb([20, 0], [0, -50])
sp.plot_text('E', ['KOKC', 'ICT'], color='blue')
sp.plot_parameter('NW', [10.5, 15] * units.degC, color='red')
sp.plot_symbol('S', [5, 7], high_clouds, color='green')
sp.ax.set_xlim(1, 5)
sp.ax.set_ylim(1.75, 4.25)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.25, savefig_kwargs={'dpi': 300}, remove_text=True)
def test_station_plot_replace():
"""Test that locations are properly replaced."""
fig = plt.figure(figsize=(3, 3))
# testing data
x = np.array([1])
y = np.array([1])
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=16)
sp.plot_barb([20], [0])
sp.plot_barb([5], [0])
sp.plot_parameter('NW', [10.5], color='red')
sp.plot_parameter('NW', [20], color='blue')
sp.ax.set_xlim(-3, 3)
sp.ax.set_ylim(-3, 3)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.25, savefig_kwargs={'dpi': 300}, remove_text=True)
def test_station_plot_locations():
"""Test that locations are properly replaced."""
fig = plt.figure(figsize=(3, 3))
locations = ['C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW', 'N2', 'NNE', 'ENE', 'E2',
'ESE', 'SSE', 'S2', 'SSW', 'WSW', 'W2', 'WNW', 'NNW']
x_pos = np.array([0])
y_pos = np.array([0])
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x_pos, y_pos, fontsize=8, spacing=24)
for loc in locations:
sp.plot_text(loc, [loc])
sp.ax.set_xlim(-2, 2)
sp.ax.set_ylim(-2, 2)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.00413, savefig_kwargs={'dpi': 300},
remove_text=True)
def test_stationlayout_api():
"""Test the StationPlot API."""
fig = plt.figure(figsize=(9, 9))
# testing data
x = np.array([1, 5])
y = np.array([2, 4])
data = {'temp': np.array([33., 212.]) * units.degF, 'u': np.array([2, 0]) * units.knots,
'v': np.array([0, 5]) * units.knots, 'stid': ['KDEN', 'KSHV'], 'cover': [3, 8]}
# Set up the layout
layout = StationPlotLayout()
layout.add_barb('u', 'v', units='knots')
layout.add_value('NW', 'temp', fmt='0.1f', units=units.degC, color='darkred')
layout.add_symbol('C', 'cover', sky_cover, color='magenta')
layout.add_text((0, 2), 'stid', color='darkgrey')
layout.add_value('NE', 'dewpt', color='green') # This should be ignored
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=12)
layout.plot(sp, data)
sp.ax.set_xlim(0, 6)
sp.ax.set_ylim(0, 6)
return fig
def test_station_layout_odd_data():
"""Test more corner cases with data passed in."""
fig = plt.figure(figsize=(9, 9))
# Set up test layout
layout = StationPlotLayout()
layout.add_barb('u', 'v')
layout.add_value('W', 'temperature', units='degF')
# Now only use data without wind and no units
data = {'temperature': [25.]}
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), [1], [2], fontsize=12)
layout.plot(sp, data)
assert True
def test_station_layout_replace():
"""Test that layout locations are replaced."""
layout = StationPlotLayout()
layout.add_text('E', 'temperature')
layout.add_value('E', 'dewpoint')
assert 'E' in layout
assert layout['E'][0] is StationPlotLayout.PlotTypes.value
assert layout['E'][1] == 'dewpoint'
def test_station_layout_names():
"""Test getting station layout names."""
layout = StationPlotLayout()
layout.add_barb('u', 'v')
layout.add_text('E', 'stid')
layout.add_value('W', 'temp')
layout.add_symbol('C', 'cover', lambda x: x)
assert sorted(layout.names()) == ['cover', 'stid', 'temp', 'u', 'v']
@pytest.mark.mpl_image_compare(tolerance=0, savefig_kwargs={'dpi': 300}, remove_text=True)
def test_simple_layout():
"""Test metpy's simple layout for station plots."""
fig = plt.figure(figsize=(9, 9))
# testing data
x = np.array([1, 5])
y = np.array([2, 4])
data = {'air_temperature': np.array([33., 212.]) * units.degF,
'dew_point_temperature': np.array([28., 80.]) * units.degF,
'air_pressure_at_sea_level': np.array([29.92, 28.00]) * units.inHg,
'eastward_wind': np.array([2, 0]) * units.knots,
'northward_wind': np.array([0, 5]) * units.knots, 'cloud_coverage': [3, 8],
'current_wx1_symbol': [65, 75], 'unused': [1, 2]}
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=12)
simple_layout.plot(sp, data)
sp.ax.set_xlim(0, 6)
sp.ax.set_ylim(0, 6)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.1848, savefig_kwargs={'dpi': 300}, remove_text=True)
def test_nws_layout():
"""Test metpy's NWS layout for station plots."""
fig = plt.figure(figsize=(3, 3))
# testing data
x = np.array([1])
y = np.array([2])
data = {'air_temperature': np.array([77]) * units.degF,
'dew_point_temperature': np.array([71]) * units.degF,
'air_pressure_at_sea_level': np.array([999.8]) * units('mbar'),
'eastward_wind': np.array([15.]) * units.knots,
'northward_wind': np.array([15.]) * units.knots, 'cloud_coverage': [7],
'current_wx1_symbol': [80], 'high_cloud_type': [1], 'medium_cloud_type': [3],
'low_cloud_type': [2], 'visibility_in_air': np.array([5.]) * units.mile,
'tendency_of_air_pressure': np.array([-0.3]) * units('mbar'),
'tendency_of_air_pressure_symbol': [8]}
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=12, spacing=16)
nws_layout.plot(sp, data)
sp.ax.set_xlim(0, 3)
sp.ax.set_ylim(0, 3)
return fig
@pytest.mark.mpl_image_compare(tolerance=1.05, remove_text=True)
def test_plot_text_fontsize():
"""Test changing fontsize in plot_text."""
fig = plt.figure(figsize=(3, 3))
ax = plt.subplot(1, 1, 1)
# testing data
x = np.array([1])
y = np.array([2])
# Make the plot
sp = StationPlot(ax, x, y, fontsize=36)
sp.plot_text('NW', ['72'], fontsize=24)
sp.plot_text('SW', ['60'], fontsize=4)
sp.ax.set_xlim(0, 3)
sp.ax.set_ylim(0, 3)
return fig
@pytest.mark.mpl_image_compare(tolerance=1.05, remove_text=True)
def test_plot_symbol_fontsize():
"""Test changing fontsize in plotting of symbols."""
fig = plt.figure(figsize=(3, 3))
ax = plt.subplot(1, 1, 1)
sp = StationPlot(ax, [0], [0], fontsize=8, spacing=32)
sp.plot_symbol('E', [92], current_weather)
sp.plot_symbol('W', [96], current_weather, fontsize=100)
return fig
def test_layout_str():
"""Test layout string representation."""
layout = StationPlotLayout()
layout.add_barb('u', 'v')
layout.add_text('E', 'stid')
layout.add_value('W', 'temp')
layout.add_symbol('C', 'cover', lambda x: x)
assert str(layout) == ('{C: (symbol, cover, ...), E: (text, stid, ...), '
"W: (value, temp, ...), barb: (barb, ('u', 'v'), ...)}")
@pytest.fixture
def wind_plot():
"""Create southerly wind test data."""
v = np.full((5, 5), 10, dtype=np.float64)
u = np.zeros_like(v)
x, y = np.meshgrid(np.linspace(-120, -60, 5), np.linspace(25, 50, 5))
return u, v, x, y
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.01)
def test_barb_projection(wind_plot, ccrs):
"""Test that barbs are properly projected (#598)."""
u, v, x, y = wind_plot
# Plot and check barbs (they should align with grid lines)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.gridlines(xlocs=[-120, -105, -90, -75, -60], ylocs=np.arange(24, 55, 6))
sp = StationPlot(ax, x, y, transform=ccrs.PlateCarree())
sp.plot_barb(u, v)
return fig
@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.01)
def test_arrow_projection(wind_plot, ccrs):
"""Test that arrows are properly projected."""
u, v, x, y = wind_plot
# Plot and check barbs (they should align with grid lines)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
ax.gridlines(xlocs=[-120, -105, -90, -75, -60], ylocs=np.arange(24, 55, 6))
sp = StationPlot(ax, x, y, transform=ccrs.PlateCarree())
sp.plot_arrow(u, v)
sp.plot_arrow(u, v) # plot_arrow used twice to hit removal if statement
return fig
@pytest.fixture
def wind_projection_list():
"""Create wind lists for testing."""
lat = [38.22, 38.18, 38.25]
lon = [-85.76, -85.86, -85.77]
u = [1.89778964, -3.83776523, 3.64147732] * units('m/s')
v = [1.93480072, 1.31000184, 1.36075552] * units('m/s')
return lat, lon, u, v
def test_barb_projection_list(wind_projection_list):
"""Test that barbs will be projected when lat/lon lists are provided."""
lat, lon, u, v = wind_projection_list
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
stnplot = StationPlot(ax, lon, lat)
stnplot.plot_barb(u, v)
assert stnplot.barbs
def test_arrow_projection_list(wind_projection_list):
"""Test that arrows will be projected when lat/lon lists are provided."""
lat, lon, u, v = wind_projection_list
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
stnplot = StationPlot(ax, lon, lat)
stnplot.plot_arrow(u, v)
assert stnplot.arrows
@pytest.fixture
def barbs_units():
"""Create barbs with units for testing."""
x_pos = np.array([0])
y_pos = np.array([0])
u_wind = np.array([3.63767155210412]) * units('m/s')
v_wind = np.array([3.63767155210412]) * units('m/s')
return x_pos, y_pos, u_wind, v_wind
@pytest.mark.mpl_image_compare(tolerance=0.0048, remove_text=True)
def test_barb_unit_conversion(barbs_units):
"""Test that barbs units can be converted at plot time (#737)."""
x_pos, y_pos, u_wind, v_wind = barbs_units
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
stnplot = StationPlot(ax, x_pos, y_pos)
stnplot.plot_barb(u_wind, v_wind, plot_units='knots')
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.0048, remove_text=True)
def test_arrow_unit_conversion(barbs_units):
"""Test that arrow units can be converted at plot time (#737)."""
x_pos, y_pos, u_wind, v_wind = barbs_units
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
stnplot = StationPlot(ax, x_pos, y_pos)
stnplot.plot_arrow(u_wind, v_wind, plot_units='knots')
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.0048, remove_text=True)
def test_barb_no_default_unit_conversion():
"""Test that barbs units are left alone by default (#737)."""
x_pos = np.array([0])
y_pos = np.array([0])
u_wind = np.array([3.63767155210412]) * units('m/s')
v_wind = np.array([3.63767155210412]) * units('m/s')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
stnplot = StationPlot(ax, x_pos, y_pos)
stnplot.plot_barb(u_wind, v_wind)
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
return fig
@pytest.mark.parametrize('u,v', [(np.array([3]) * units('m/s'), np.array([3])),
(np.array([3]), np.array([3]) * units('m/s'))])
def test_barb_unit_conversion_exception(u, v):
"""Test that errors are raise if unit conversion is requested on un-united data."""
x_pos = np.array([0])
y_pos = np.array([0])
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
stnplot = StationPlot(ax, x_pos, y_pos)
with pytest.raises(ValueError):
stnplot.plot_barb(u, v, plot_units='knots')
@pytest.mark.mpl_image_compare(tolerance=0.021, savefig_kwargs={'dpi': 300}, remove_text=True)
def test_symbol_pandas_timeseries():
"""Test the usage of Pandas DatetimeIndex as a valid `x` input into StationPlot."""
pd.plotting.register_matplotlib_converters()
rng = pd.date_range('12/1/2017', periods=5, freq='D')
sc = [1, 2, 3, 4, 5]
ts = pd.Series(sc, index=rng)
fig, ax = plt.subplots()
y = np.ones(len(ts.index))
stationplot = StationPlot(ax, ts.index, y, fontsize=12)
stationplot.plot_symbol('C', ts, sky_cover)
ax.xaxis.set_major_locator(matplotlib.dates.DayLocator())
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%-d'))
return fig
@pytest.mark.mpl_image_compare(tolerance=2.444, savefig_kwargs={'dpi': 300}, remove_text=True)
def test_stationplot_unit_conversion():
"""Test the StationPlot API."""
fig = plt.figure(figsize=(9, 9))
# testing data
x = np.array([1, 5])
y = np.array([2, 4])
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=16)
sp.plot_barb([20, 0], [0, -50])
sp.plot_text('E', ['KOKC', 'ICT'], color='blue')
sp.plot_parameter('NW', [10.5, 15] * units.degC, plot_units='degF', color='red')
sp.plot_symbol('S', [5, 7], high_clouds, color='green')
sp.ax.set_xlim(0, 6)
sp.ax.set_ylim(0, 6)
return fig
def test_scalar_unit_conversion_exception():
"""Test that errors are raise if unit conversion is requested on un-united data."""
x_pos = np.array([0])
y_pos = np.array([0])
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
stnplot = StationPlot(ax, x_pos, y_pos)
with pytest.raises(ValueError):
stnplot.plot_parameter('C', 50, plot_units='degC')
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, f, ci, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + f)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
return _lstm_ops_so.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `3`.
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = _lstm_ops_so.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.pack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
name=name,
use_peephole=use_peephole)
return array_ops.unpack(i), array_ops.unpack(cs), array_ops.unpack(
f), array_ops.unpack(o), array_ops.unpack(ci), array_ops.unpack(
co), array_ops.unpack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
ops.RegisterShape("LSTMBlockCell")(common_shapes.call_cpp_shape_fn)
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wco, wcf, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = _lstm_ops_so.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat(1, [x, h_prev])
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
ops.RegisterShape("LSTMBlockCellGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("BlockLSTM")(common_shapes.call_cpp_shape_fn)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wco, wcf, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad, wcf_grad,
b_grad) = _lstm_ops_so.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wco,
wcf,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad,
wcf_grad, b_grad]
ops.RegisterShape("BlockLSTMGrad")(common_shapes.call_cpp_shape_fn)
class LSTMBlockCell(rnn_cell.RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `rnn_cell.LSTMCell`, this is a monolithic op and should be much faster.
The weight and bias matrixes should be compatible as long as the variable
scope matches, and you use `use_compatible_names=True`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_peephole=False,
use_compatible_names=False):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
use_peephole: Whether to use peephole connections or not.
use_compatible_names: If True, use the same variable naming as
rnn_cell.LSTMCell
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
if use_compatible_names:
self._names = {
"W": "W_0",
"b": "B",
"wci": "W_I_diag",
"wco": "W_O_diag",
"wcf": "W_F_diag",
"scope": "LSTMCell"
}
else:
self._names = {
"W": "W",
"b": "b",
"wci": "wci",
"wco": "wco",
"wcf": "wcf",
"scope": "LSTMBlockCell"
}
@property
def state_size(self):
return (self._num_units,) * 2
@property
def output_size(self):
return self._num_units
def __call__(self, x, states_prev, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or self._names["scope"]):
x_shape = x.get_shape().with_rank(2)
if not x_shape[1]:
raise ValueError("Expecting x_shape[1] to be sets: %s" % str(x_shape))
if len(states_prev) != 2:
raise ValueError("Expecting states_prev to be a tuple with length 2.")
input_size = x_shape[1]
w = vs.get_variable(self._names["W"], [input_size + self._num_units,
self._num_units * 4])
b = vs.get_variable(
self._names["b"], [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
wci = vs.get_variable(self._names["wci"], [self._num_units])
wco = vs.get_variable(self._names["wco"], [self._num_units])
wcf = vs.get_variable(self._names["wcf"], [self._num_units])
else:
wci = wco = wcf = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = states_prev
(_, cs, _, _, _, _, h) = _lstm_block_cell(
x,
cs_prev,
h_prev,
w,
b,
wci=wci,
wco=wco,
wcf=wcf,
forget_bias=self._forget_bias,
use_peephole=self._use_peephole)
return (h, (cs, h))
class LSTMBlockWrapper(fused_rnn_cell.FusedRNNCell):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
or a list of `time_len` tensors of shape `[batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
scope: `VariableScope` for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
with vs.variable_scope(scope or type(self).__name__):
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.pack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" %
inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError(
"Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.pack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(inputs, initial_cell_state,
initial_output, dtype,
sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(
sequence_length, time_len, dtype=dtype), [1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
0, [array_ops.expand_dims(initial_cell_state, [0]), cell_states])
mod_outputs = array_ops.concat(
0, [array_ops.expand_dims(initial_output, [0]), outputs])
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unpack(outputs)
return outputs, (final_cell_state, final_output)
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `rnn_cell.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Defaults to `3`.
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip
self._use_peephole = use_peephole
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
input_size = inputs_shape[2].value
w = vs.get_variable(
"W_0", [input_size + self._num_units, self._num_units * 4], dtype=dtype)
b = vs.get_variable(
"B", [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0),
dtype=dtype)
if self._use_peephole:
wci = vs.get_variable("W_I_diag", [self._num_units], dtype=dtype)
wco = vs.get_variable("W_O_diag", [self._num_units], dtype=dtype)
wcf = vs.get_variable("W_F_diag", [self._num_units], dtype=dtype)
else:
wci = wco = wcf = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = time_len
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = _lstm_ops_so.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
| |
import codecs
import copy
import hashlib
import json
import os
import requests
import numpy as numpy
import pandas as pd
from datetime import date
from slugify import slugify
from time import sleep
class OpenContextAPI():
''' Interacts with the Open Context API
to get lists of records for analysis
See API documentation here:
https://opencontext.org/about/services
'''
# -----------------------------------------------------------------
# NOTE: Open Context provides JSON(-LD) responses to searches and
# queries. This class interacts with the Open Context JSON API to
# obtain data for independent analysis and visualization.
#
# The Open Context JSON-LD service can (hopefully) be used as
# 'linked-data' (read and modeled as RDF triples). However, this
# class simply treats the Open Context API as a JSON service and
# does not treat the data as RDF / graph data.
#
# Open Context's JSON-LD service is currently slow, a situation
# will hopefully be resolved by fall of 2020. So therefore, caching
# requests is an important aspect of this class. All requests for
# JSON data get cached as files on the local file system.
#
# -----------------------------------------------------------------
# The name of the directory for file caching JSON data from Open Context
API_CACHE_DIR = 'oc-api-cache'
RECS_PER_REQUEST = 200 # number of records to retrieve per request
# Open Context allows record attributes to have multiple values. If
# FLATTEN_ATTRIBUTES = True, these attributes are returned as a
# single value, with multiple values combined with a delimiter.
FLATTEN_ATTRIBUTES = False
RESPONSE_TYPE_LIST = ['metadata', 'uri-meta']
SLEEP_TIME = 0.25 # seconds to pause between requests
TEXT_FACET_OPTION_KEYS = [
'oc-api:has-id-options',
'oc-api:has-text-options',
]
NON_TEXT_OPTION_KEYS = [
# This is currently implemented in Open Context's API but
# will be deprecated.
'oc-api:has-numeric-options',
# NOTE: We're in the process of updating Open Context's API.
# the following will be supported in the future.
'oc-api:has-boolean-options',
'oc-api:has-integer-options',
'oc-api:has-float-options',
'oc-api:has-date-options',
]
FACET_OPTIONS_KEYS = TEXT_FACET_OPTION_KEYS + NON_TEXT_OPTION_KEYS
# Biological taxonomies are in deep hierarchies. Do not include
# these taxonomies when looking for standard attributes. This
# list has prefixes for slugs in these biological taxonomies
# to identify slugs to NOT consider as attribute slugs.
NON_ATTRIBUTE_SLUG_PREFIXES = [
'gbif-', # See: https://gbif.org
'eol-p-', # See: https://eol.org
]
VON_DEN_DRIESCH_PROP = 'oc-zoo-anatomical-meas---oc-zoo-von-den-driesch-bone-meas'
# Open Context allows record attributes to have multiple values
# which is necessary because that's how data contributors often
# describe their observations. But that's a pain for analysis,
# so below we list options for handling multiple values for
# attributes
MULTI_VALUE_ATTRIBUTE_HANDLING = [
'first', # Choose the first value
'last', # Choose the second value
'json', # Output a multivalue list as a JSON formated string
'concat', # Concatenate with a delimiter (defaults to '; ')
'column_val', # Add values to column names and True for present
]
STANDARD_MULTI_VALUE_HANDLING = {
# Bone fusion is best handled the few fusion options in the
# column names, and True indicating the presense of a value.
'Has fusion character': 'column_val',
}
# For cosmetic, usability reasons it's good to have some consistent
# order for columns that will be expected for all Open Context
# search / query result records. This is lists the first columns
# in their expected order.
DEFAULT_FIRST_DF_COLUMNS = [
'uri',
'citation uri',
'label',
'item category',
'project label',
'project uri',
'published',
'updated',
'latitude',
'longitude',
'early bce/ce',
'late bce/ce',
'context uri',
]
INFER_DATATYPE_MAPPINGS = {
'floating': 'float64',
'decimal': 'float64',
'integer': 'int',
'datetime': 'datetime64',
'boolean': 'bool',
}
# Template for column names fot columns at different levels
# of depth.
CONTEXT_LEVEL_COLUMN_TEMPLATE = 'Context ({})'
def __init__(self):
self.recs_per_request = self.RECS_PER_REQUEST
self.sleep_time = self.SLEEP_TIME
self.flatten_attributes = self.FLATTEN_ATTRIBUTES
self.response_types = self.RESPONSE_TYPE_LIST
# The cache prefix is a prefix that defaults to a
# string representation of today's date.
self.cache_file_prefix = date.today().strftime('%Y-%m-%d')
# Different search results can have different levels of depth
# for describing context.
self.max_result_context_depth = 0
self.multi_value_handle_non_number = 'concat'
self.multi_value_handle_number = 'first'
self.multi_value_delim = '; '
self.multi_value_handle_keyed_attribs = self.STANDARD_MULTI_VALUE_HANDLING.copy()
def set_cache_file_prefix(self, text_for_prefix):
'''Makes a 'slug-ified' cache file prefix'''
self.cache_file_prefix = slugify(text_for_prefix)
def _modify_get_params_by_url_check(self, url, params):
'''Makes an extra params dict for parameters NOT already in a URL'''
# Add the parameters that are not actually already in the
# url.
if not params:
return {}
extra_params = {
k:v for k,v in params.items() if (k + '=') not in url
}
if params.get('prop') and not extra_params.get('prop'):
# The param 'prop' is special, since we can have more than
# one of these in a url.
if ('prop=' + params['prop']) not in url:
# This particular prop and value is not already in the
# url, so it's OK to add to the extra_params dict.
extra_params['prop'] = params['prop']
return extra_params
def _make_url_cache_file_name(
self,
url,
extra_params={},
extension='.json'
):
'''Makes a cache file name for a url'''
if '#' in url:
# Everything after a '#' can be discarded, the # portion
# of a url is only important for web-browser behaviors, and
# does not matter for requests to the Open Context server.
url = url.split('#')[0]
extra_suffix = ''
if extra_params:
extra_params = self._modify_get_params_by_url_check(
url,
extra_params
)
# We have some extra paramaters, add a suffix to the URL
# by dumping these as a string. This is not meant to make
# real URL, just to make a cache-key that captures all
# the parameters.
extra_suffix = str(extra_params)
hash_obj = hashlib.sha1()
hash_obj.update((url + extra_suffix).encode('utf-8'))
hash_url = hash_obj.hexdigest()
cache_file_name = (
self.cache_file_prefix
+ '-'
+ hash_url
+ extension
)
return cache_file_name
def clear_api_cache(self, keep_prefix=True):
'''Cleans old data from the API cache'''
repo_path = os.path.dirname(os.path.abspath(os.getcwd()))
cache_dir = os.path.join(
repo_path, self.API_CACHE_DIR
)
if not os.path.exists(cache_dir):
# No cache directory exists, so nothing to erase.
return None
# Iterate through the files in the cache_dir, skip those
# that we want to keep and delete the rest.
for f in os.listdir(cache_dir):
file_path = os.path.join(cache_dir, f)
if not os.path.isfile(file_path):
# Not a file, so skip
continue
if keep_prefix and f.startswith(self.cache_file_prefix):
# We skip because we're keeping files that start with
# the current self.cache_file_prefix
continue
os.remove(file_path)
def _get_parse_cached_json(self, cache_file_name):
'''Returns an object parsed from cached json'''
repo_path = os.path.dirname(os.path.abspath(os.getcwd()))
path_file = os.path.join(
repo_path, self.API_CACHE_DIR, cache_file_name
)
try:
obj_from_json = json.load(
codecs.open(path_file, 'r','utf-8-sig')
)
except:
obj_from_json = None
return obj_from_json
def _cache_json(self, cache_file_name, obj_to_json):
'''Caches an object as json to a cache_file_name'''
repo_path = os.path.dirname(os.path.abspath(os.getcwd()))
cache_dir = os.path.join(
repo_path, self.API_CACHE_DIR
)
if not os.path.exists(cache_dir):
# Make sure we actually have the cache directory.
os.makedirs(cache_dir)
path_file = os.path.join(
cache_dir, cache_file_name
)
json_output = json.dumps(
obj_to_json,
indent=4,
ensure_ascii=False
)
file = codecs.open(path_file, 'w', 'utf-8')
file.write(json_output)
file.close()
def get_cache_url(self, url, extra_params={}, print_url=True):
'''Gets and caches JSON data from an Open Context URL'''
cache_file_name = self._make_url_cache_file_name(
url,
extra_params=extra_params
)
obj_from_json = self._get_parse_cached_json(cache_file_name)
if obj_from_json:
# We got recent, readable JSON from the cache. No need to
# fetch from Open Context.
return obj_from_json
# Set the request headers to ask Open Context to return
# a JSON representation.
headers = {
'accept': 'application/json'
}
extra_params = self._modify_get_params_by_url_check(
url,
extra_params
)
try:
sleep(self.sleep_time) # pause to not overwhelm the API
r = requests.get(url, params=extra_params, headers=headers)
r.raise_for_status()
if print_url:
print('GET Success for JSON data from: {}'.format(r.url))
obj_from_oc = r.json()
except:
# Everything stops and breaks if we get here.
obj_from_oc = None
if not obj_from_oc:
raise('Request fail with URL: {}'.format(url))
self._cache_json(cache_file_name, obj_from_oc)
return obj_from_oc
def get_standard_attributes(
self,
url,
add_von_den_driesch_bone_measures=False
):
'''Gets the 'standard' attributes from a search URL
'''
# -------------------------------------------------------------
# NOTE: Open Context records often have 'standard' attributes,
# meaning attributes that are with data from multiple projects.
# These attributes are typically identified by a URI so can be
# considered linked data.
# -------------------------------------------------------------
extra_params = {}
if add_von_den_driesch_bone_measures:
# Standard Von Den Driesch bone measurement attributes are
# a little buried in Open Context's API. If this argument
# is True, we add a parameter to the GET request to make
# sure that we have it.
extra_params['prop'] = self.VON_DEN_DRIESCH_PROP
json_data = self.get_cache_url(url, extra_params=extra_params)
if not json_data:
# Somthing went wrong, so skip out.
return None
attribute_slug_labels = []
total_found = json_data.get('totalResults', 0)
if total_found < 1:
return attribute_slug_labels
for facet in json_data.get('oc-api:has-facets', []):
for check_option in self.FACET_OPTIONS_KEYS:
if not check_option in facet:
# Skip, the facet does not have the current
# check option key.
continue
def_uri = facet.get('rdfs:isDefinedBy')
if not def_uri:
# Skip. The facet does not have a URI for a
# definition.
continue
# Default to not adding attributes.
add_attributes = False
if (not def_uri.startswith('oc-gen:')
and not def_uri.startswith('oc-api:')
and not def_uri.startswith(
'http://opencontext.org'
)):
# This is defined outside of Open Context, so
# is a 'standard'.
add_attributes = True
if def_uri.startswith(
'oc-api:facet-prop-ld'
):
# This is for facets for link data defined
# 'standards' attributes. These are OK to
# include as attributes/
add_attributes = True
if def_uri.startswith(
'http://opencontext.org/vocabularies/open-context-zooarch/'
):
# Open Context has also defined some standard
# attributes for zooarchaeological data.
add_attributes = True
if not add_attributes:
# Skip the rest, we're not adding any
# attributes in this loop.
continue
for f_opt in facet[check_option]:
if not f_opt.get('slug') or not f_opt.get('label'):
continue
skip_slug = False
for skip_prefix in self.NON_ATTRIBUTE_SLUG_PREFIXES:
if f_opt['slug'].startswith(skip_prefix):
skip_slug = True
if skip_slug:
# The slug starts with prefix that identifies
# non-attribute slugs. So don't add to the
# attribute list and skip.
continue
# Make a tuple of the slug and label
slug_label = (
f_opt['slug'],
f_opt['label'],
)
if slug_label in attribute_slug_labels:
# Skip, we already have this.
continue
attribute_slug_labels.append(
slug_label
)
# Return the list of slug_label tuples.
return attribute_slug_labels
def get_common_attributes(self, url, min_portion=0.2):
'''Gets commonly used attributes from a search URL
'''
# -------------------------------------------------------------
# NOTE: Open Context records can have many different
# descriptive attributes. This gets a list of attribute
# slug label tuples for descriptive attributes used in
# a proportion of the results records above a given threshold.
# -------------------------------------------------------------
json_data = self.get_cache_url(url)
if not json_data:
# Somthing went wrong, so skip out.
return None
attribute_slug_labels = []
total_found = json_data.get('totalResults', 0)
if total_found < 1:
return attribute_slug_labels
# Minimum threshold of counts to accept an attribute
# as common enough.
threshold = (total_found * min_portion)
for facet in json_data.get('oc-api:has-facets', []):
for check_option in self.FACET_OPTIONS_KEYS:
if not check_option in facet:
# Skip, the facet does not have the current
# check option key.
continue
def_uri = facet.get('rdfs:isDefinedBy', '')
if not (def_uri == 'oc-api:facet-prop-var'
or def_uri.startswith('http://opencontext.org/predicates/')):
# This is not an Open Context project defined attribute
continue
for f_opt in facet[check_option]:
if not f_opt.get('slug') or not f_opt.get('label'):
# We are missing some needed attributes.
continue
if not f_opt.get('rdfs:isDefinedBy', '').startswith(
'http://opencontext.org/predicates/'):
# This is not an predicate (attribute)
# so don't add and skip.
continue
if f_opt.get('count', 0) < threshold:
# The count for this predicate is below the
# threshold for acceptance as 'common'.
continue
# Make a tuple of the slug and label
slug_label = (
f_opt['slug'],
f_opt['label'],
)
if slug_label in attribute_slug_labels:
# Skip, we already have this.
continue
attribute_slug_labels.append(
slug_label
)
# Return the list of slug_label tuples.
return attribute_slug_labels
def _handle_multi_values(self, handle, key, values, record):
"""Handles multi-values according to configuration"""
if handle not in self.MULTI_VALUE_ATTRIBUTE_HANDLING:
raise(
'Unknown multi-value handling: {} must be: {}'.format(
handle,
str(self.MULTI_VALUE_ATTRIBUTE_HANDLING),
)
)
if not isinstance(values, list):
values = [values]
if handle == 'first':
record[key] = values[0]
elif handle == 'last':
record[key] = values[-1]
elif handle == 'json':
record[key] = json.dumps(values, ensure_ascii=False)
elif handle == 'concat':
record[key] = self.multi_value_delim.join([str(v) for v in values])
elif handle == 'column_val':
for val in values:
new_key = '{} :: {}'.format(key, val)
record[new_key] = True
return record
def _process_record_attributes(self, raw_record):
"""Process a raw record to format for easy dataframe use
:param dict raw_record: A dictionary object of a search/query
result returned from Open Context's JSON API.
"""
record = {}
for key, value in raw_record.items():
if key == 'context label':
# Contexts are only single value attributes,
# so don't worry about multi-values. However,
# we need to split context paths into multiple
# columns to make analysis easier.
contexts = value.split('/')
if len(contexts) > self.max_result_context_depth:
self.max_result_context_depth = len(contexts)
for i, context in enumerate(contexts, 1):
record[
self.CONTEXT_LEVEL_COLUMN_TEMPLATE.format(i)
] = context
# Now continue in the loop so we skip everything
# else below.
continue
if self.multi_value_handle_keyed_attribs.get(key):
# This specific attribute key has a multi-value configuration
record = self._handle_multi_values(
handle=self.multi_value_handle_keyed_attribs.get(key),
key=key,
values=value,
record=record
)
# Now continue in the loop so we skip everything
# else below.
continue
if not isinstance(value, list):
# The simple, happy case of a single value for this
# attribute key
record[key] = value
# Now continue in the loop so we skip everything
# else below.
continue
# We have multiple values for this attribute, but no
# specific configuration for this attribute key. So
# first check if this is a number or not. Numbers versus
# non-number multiple values can have different configured
# handeling.
value_list = []
all_number = True
for val in value:
try:
num_val = float(val)
value_list.append(num_val)
except:
all_number = False
value_list.append(val)
if all_number:
handle = self.multi_value_handle_number
else:
handle = self.multi_value_handle_non_number
record = self._handle_multi_values(
handle=handle,
key=key,
values=value_list,
record=record
)
return record
def get_paged_json_records(self,
url,
attribute_slugs,
do_paging=True,
split_contexts=True
):
'''Gets records data from a URL, recursively get next page
'''
# Set some additional HTTP GET parameters to ask Open Context
# for a certain number of rows, described by a comma seperated
# list of attributes, including certain kinds of JSON in the
# response.
params = {}
params['rows'] = self.recs_per_request
if len(attribute_slugs):
params['attributes'] = ','.join(attribute_slugs)
if len(self.response_types):
params['response'] = ','.join(self.response_types)
if self.flatten_attributes:
params['flatten-attributes'] = 1
# Now make the request to Open Context or get a previously
# cached request saved on the local file system.
json_data = self.get_cache_url(
url,
extra_params=params,
print_url=False
)
if not json_data:
# Somthing went wrong, so skip out.
return None
# This is for some progress feedback as this runs. The Open
# Context API is still slow (until we complete updates to it)
# so it's nice to get some periodic feedback that this
# function is still working and making progress.
last_rec = (
json_data.get('startIndex', 0)
+ json_data.get('itemsPerPage', 0)
)
if last_rec > json_data.get('totalResults'):
last_rec = json_data.get('totalResults')
print(
'Got records {} to {} of {} from: {}'.format(
(json_data.get('startIndex', 0) + 1),
last_rec,
json_data.get('totalResults'),
json_data.get('id'),
),
end="\r",
)
# Get the raw record results from the Open Context JSON
# response and do some processing to make them a little
# easier to use.
raw_records = json_data.get('oc-api:has-results', [])
records = []
for raw_record in raw_records:
record = self._process_record_attributes(
raw_record
)
records.append(record)
# Check to see if there's a 'next' url. That indicates we still
# can continue paging through all the results in this
# search / query.
next_url = json_data.get('next')
if do_paging and next_url:
# Recursively get the next page of results and add these
# result records to the list of records.
records += self.get_paged_json_records(
next_url,
attribute_slugs,
do_paging
)
return records
def _infer_set_dataframe_col_datatypes(self, df):
"""Infers and sets column datatypes for a dataframe"""
for col in df.columns.tolist():
d_type = pd.api.types.infer_dtype(df[col], skipna=True)
if not self.INFER_DATATYPE_MAPPINGS.get(d_type):
# We're not changing the data type of this column.
continue
if d_type == 'boolean':
df[col] = df[col].fillna(value=False)
df[col] = df[col].astype(
self.INFER_DATATYPE_MAPPINGS.get(
d_type
)
)
return df
def _reorder_dataframe_columns(self, df):
"""Reorders dataframe columns cosmetically"""
# Make a list of columns that will include all the
# contexts up to the maximum context depth for these
# records.
context_cols = [
self.CONTEXT_LEVEL_COLUMN_TEMPLATE.format(i)
for i in range(1, (self.max_result_context_depth + 1))
]
# Make a list of columns to order first, checking to
# make sure that they are actually present in the dataframe.
first_cols = [
col
for col in (self.DEFAULT_FIRST_DF_COLUMNS + context_cols)
if col in df.columns
]
other_cols = [
col
for col in df.columns.tolist()
if col not in first_cols
]
obj_col_counts = [
(col, len(df[col].unique().tolist()),)
for col in other_cols
if df[col].dtypes == 'object'
]
# Sort by the second element in the tuple (unique value counts)
obj_col_counts.sort(key=lambda tup: tup[1])
# Now just make a list of the column names, no counts.
obj_cols = [col for col, _ in obj_col_counts]
# Now gather the boolean value columns, sort them by name.
bool_cols = [
col
for col in other_cols
if df[col].dtypes == 'bool'
]
bool_cols.sort()
# The 'middle columns' are the count sorted object columns
# plus the name sorted boolean columns.
middle_cols = obj_cols + bool_cols
# The final columns are everything else, sorted by name.
final_cols = [col for col in other_cols if col not in middle_cols]
final_cols.sort()
return df[(first_cols + middle_cols + final_cols)]
def url_to_dataframe(self, url, attribute_slugs):
'''Makes a dataframe from Open Context search URL'''
self.max_result_context_depth = 0
records = self.get_paged_json_records(
url,
attribute_slugs,
do_paging=True
)
df = pd.DataFrame(records)
# Infer data types for the columns.
df = self._infer_set_dataframe_col_datatypes(df)
# NOTE: everything below is cosmetic, to order columns
# of the output dataframe predictably.
df = self._reorder_dataframe_columns(df)
return df
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for rietveld.py."""
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.patches_data import GIT, RAW
import patch
import rietveld
def _api(files):
"""Mock a rietveld api request."""
return rietveld.json.dumps({'files': files})
def _file(
status, is_binary=False, num_chunks=1, chunk_id=789, property_changes=''):
"""Mock a file in a rietveld api request."""
return {
'status': status,
'is_binary': is_binary,
'num_chunks': num_chunks,
'id': chunk_id,
'property_changes': property_changes,
}
class BaseFixture(unittest.TestCase):
# Override.
TESTED_CLASS = Exception
def setUp(self):
super(BaseFixture, self).setUp()
# Access to a protected member XX of a client class
# pylint: disable=W0212
self.rietveld = self.TESTED_CLASS('url', 'email', 'password')
self.rietveld._send = self._rietveld_send
self.requests = []
def tearDown(self):
self.assertEqual([], self.requests)
super(BaseFixture, self).tearDown()
def _rietveld_send(self, url, *args, **kwargs):
self.assertTrue(self.requests, url)
request = self.requests.pop(0)
self.assertEqual(2, len(request))
self.assertEqual(url, request[0])
return request[1]
def _check_patch(self,
p,
filename,
diff,
source_filename=None,
is_binary=False,
is_delete=False,
is_git_diff=False,
is_new=False,
patchlevel=0,
svn_properties=None):
svn_properties = svn_properties or []
self.assertEqual(p.filename, filename)
self.assertEqual(p.source_filename, source_filename)
self.assertEqual(p.is_binary, is_binary)
self.assertEqual(p.is_delete, is_delete)
if hasattr(p, 'is_git_diff'):
self.assertEqual(p.is_git_diff, is_git_diff)
self.assertEqual(p.is_new, is_new)
if hasattr(p, 'patchlevel'):
self.assertEqual(p.patchlevel, patchlevel)
if diff:
self.assertEqual(p.get(True), diff)
if hasattr(p, 'svn_properties'):
self.assertEqual(p.svn_properties, svn_properties)
class RietveldTest(BaseFixture):
TESTED_CLASS = rietveld.Rietveld
def test_get_patch_empty(self):
self.requests = [('/api/123/456', '{}')]
patches = self.rietveld.get_patch(123, 456)
self.assertTrue(isinstance(patches, patch.PatchSet))
self.assertEqual([], patches.patches)
def test_get_patch_no_status(self):
self.requests = [
( '/api/123/456',
_api(
{
'tools/clang_check/README.chromium': {
'status': None,
'id': 789,
}})),
('/download/issue123_456_789.diff', RAW.DELETE),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'tools/clang_check/README.chromium',
RAW.DELETE,
is_delete=True)
def test_get_patch_2_files(self):
self.requests = [
('/api/123/456',
_api({'foo': _file('A'), 'file_a': _file('M', chunk_id=790)})),
('/download/issue123_456_789.diff', RAW.NEW),
('/download/issue123_456_790.diff', RAW.NEW_NOT_NULL),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(2, len(patches.patches))
self._check_patch(
patches.patches[0], 'file_a', RAW.NEW_NOT_NULL, is_new=True)
self._check_patch(patches.patches[1], 'foo', RAW.NEW, is_new=True)
def test_get_patch_add(self):
self.requests = [
('/api/123/456', _api({'foo': _file('A')})),
('/download/issue123_456_789.diff', RAW.NEW),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(patches.patches[0], 'foo', RAW.NEW, is_new=True)
def test_invalid_status(self):
self.requests = [
('/api/123/456', _api({'file_a': _file('B')})),
]
try:
self.rietveld.get_patch(123, 456)
self.fail()
except patch.UnsupportedPatchFormat, e:
self.assertEqual('file_a', e.filename)
def test_add_plus_merge(self):
# svn:mergeinfo is dropped.
properties = (
'\nAdded: svn:mergeinfo\n'
' Merged /branches/funky/file_b:r69-2775\n')
self.requests = [
('/api/123/456',
_api({'pp': _file('A+', property_changes=properties)})),
('/download/issue123_456_789.diff', GIT.COPY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'pp',
GIT.COPY,
is_git_diff=True,
is_new=True,
patchlevel=1,
source_filename='PRESUBMIT.py')
def test_add_plus_eol_style(self):
properties = '\nAdded: svn:eol-style\n + LF\n'
self.requests = [
('/api/123/456',
_api({'pp': _file('A+', property_changes=properties)})),
('/download/issue123_456_789.diff', GIT.COPY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'pp',
GIT.COPY,
is_git_diff=True,
is_new=True,
patchlevel=1,
source_filename='PRESUBMIT.py',
svn_properties=[('svn:eol-style', 'LF')])
def test_add_empty(self):
self.requests = [
('/api/123/456', _api({'__init__.py': _file('A ', num_chunks=0)})),
('/download/issue123_456_789.diff', RAW.CRAP_ONLY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'__init__.py',
RAW.CRAP_ONLY,
is_new=True)
def test_delete(self):
name = 'tools/clang_check/README.chromium'
self.requests = [
('/api/123/456', _api({name: _file('D')})),
('/download/issue123_456_789.diff', RAW.DELETE),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(patches.patches[0], name, RAW.DELETE, is_delete=True)
def test_delete_empty(self):
name = 'tests/__init__.py'
self.requests = [
('/api/123/456', _api({name: _file('D')})),
('/download/issue123_456_789.diff', GIT.DELETE_EMPTY),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
name,
GIT.DELETE_EMPTY,
is_delete=True,
is_git_diff=True,
patchlevel=1)
def test_m_plus(self):
properties = '\nAdded: svn:eol-style\n + LF\n'
self.requests = [
('/api/123/456',
_api({'chrome/file.cc': _file('M+', property_changes=properties)})),
('/download/issue123_456_789.diff', RAW.PATCH),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'chrome/file.cc',
RAW.PATCH,
svn_properties=[('svn:eol-style', 'LF')])
def test_m_plus_unknown_prop(self):
properties = '\nAdded: svn:foobar\n + stuff\n'
self.requests = [
('/api/123/456',
_api({'file_a': _file('M+', property_changes=properties)})),
]
try:
self.rietveld.get_patch(123, 456)
self.fail()
except patch.UnsupportedPatchFormat, e:
self.assertEqual('file_a', e.filename)
def test_get_patch_moved(self):
self.requests = [
('/api/123/456', _api({'file_b': _file('A+')})),
('/download/issue123_456_789.diff', RAW.MINIMAL_RENAME),
]
patches = self.rietveld.get_patch(123, 456)
self.assertEqual(1, len(patches.patches))
self._check_patch(
patches.patches[0],
'file_b',
RAW.MINIMAL_RENAME,
source_filename='file_a',
is_new=True)
def test_svn_properties(self):
# Line too long (N/80)
# pylint: disable=C0301
# To test one of these, run something like
# import json, pprint, urllib
# url = 'http://codereview.chromium.org/api/202046/1'
# pprint.pprint(json.load(urllib.urlopen(url))['files'])
# svn:mergeinfo across branches:
# http://codereview.chromium.org/202046/diff/1/third_party/libxml/xmlcatalog_dummy.cc
self.assertEqual(
[('svn:eol-style', 'LF')],
rietveld.Rietveld.parse_svn_properties(
u'\nAdded: svn:eol-style\n + LF\n', 'foo'))
# svn:eol-style property that is lost in the diff
# http://codereview.chromium.org/202046/diff/1/third_party/libxml/xmllint_dummy.cc
self.assertEqual(
[],
rietveld.Rietveld.parse_svn_properties(
u'\nAdded: svn:mergeinfo\n'
' Merged /branches/chrome_webkit_merge_branch/third_party/'
'libxml/xmldummy_mac.cc:r69-2775\n',
'foo'))
self.assertEqual(
[],
rietveld.Rietveld.parse_svn_properties(u'', 'foo'))
# http://codereview.chromium.org/api/7834045/15001
self.assertEqual(
[('svn:executable', '*'), ('svn:eol-style', 'LF')],
rietveld.Rietveld.parse_svn_properties(
'\n'
'Added: svn:executable\n'
' + *\n'
'Added: svn:eol-style\n'
' + LF\n',
'foo'))
# http://codereview.chromium.org/api/9139006/7001
self.assertEqual(
[('svn:mime-type', 'image/png')],
rietveld.Rietveld.parse_svn_properties(
'\n'
'Added: svn:mime-type\n'
' + image/png\n',
'foo'))
def test_bad_svn_properties(self):
try:
rietveld.Rietveld.parse_svn_properties(u'\n', 'foo')
self.fail()
except rietveld.patch.UnsupportedPatchFormat, e:
self.assertEqual('foo', e.filename)
# TODO(maruel): Change with no diff, only svn property change:
# http://codereview.chromium.org/6462019/
def test_search_all_empty(self):
url = (
'/search?format=json'
'&base=base'
'&created_after=2010-01-02'
'&created_before=2010-01-01'
'&modified_after=2010-02-02'
'&modified_before=2010-02-01'
'&owner=owner%40example.com'
'&reviewer=reviewer%40example.com'
'&closed=2'
'&commit=2'
'&private=2'
'&keys_only=True'
'&with_messages=True'
'&limit=23')
self.requests = [
(url, '{}'),
]
results = list(self.rietveld.search(
'owner@example.com',
'reviewer@example.com',
'base',
True,
True,
True,
'2010-01-01',
'2010-01-02',
'2010-02-01',
'2010-02-02',
23,
True,
True,
))
self.assertEqual([], results)
def test_results_cursor(self):
# Verify cursor iteration is transparent.
self.requests = [
('/search?format=json&base=base',
rietveld.json.dumps({
'cursor': 'MY_CURSOR',
'results': [{'foo': 'bar'}, {'foo': 'baz'}],
})),
('/search?format=json&base=base&cursor=MY_CURSOR',
rietveld.json.dumps({
'cursor': 'NEXT',
'results': [{'foo': 'prout'}],
})),
('/search?format=json&base=base&cursor=NEXT',
rietveld.json.dumps({
'cursor': 'VOID',
'results': [],
})),
]
expected = [
{'foo': 'bar'},
{'foo': 'baz'},
{'foo': 'prout'},
]
for i in self.rietveld.search(base='base'):
self.assertEqual(expected.pop(0), i)
self.assertEqual([], expected)
class CachingRietveldTest(BaseFixture):
# Tests only one request is done.
TESTED_CLASS = rietveld.CachingRietveld
def test_get_description(self):
self.requests = [
('/1/description', 'Blah blah blah'),
]
expected = 'Blah blah blah'
self.assertEqual(expected, self.rietveld.get_description(1))
self.assertEqual(expected, self.rietveld.get_description(1))
def test_get_issue_properties(self):
self.requests = [
('/api/1?messages=true', rietveld.json.dumps({'messages': 'foo'})),
]
expected = {}
expected_msg = {'messages': 'foo'}
self.assertEqual(expected, self.rietveld.get_issue_properties(1, False))
self.assertEqual(expected_msg, self.rietveld.get_issue_properties(1, True))
def test_get_patchset_properties(self):
self.requests = [
('/api/1/2', '{}'),
]
expected = {}
self.assertEqual(expected, self.rietveld.get_patchset_properties(1, 2))
self.assertEqual(expected, self.rietveld.get_patchset_properties(1, 2))
if __name__ == '__main__':
logging.basicConfig(level=[
logging.ERROR, logging.INFO, logging.DEBUG][min(2, sys.argv.count('-v'))])
unittest.main()
| |
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""HP LeftHand SAN ISCSI REST Proxy."""
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.driver import ISCSIDriver
from cinder.volume import volume_types
from oslo.config import cfg
LOG = logging.getLogger(__name__)
try:
import hplefthandclient
from hplefthandclient import client
from hplefthandclient import exceptions as hpexceptions
except ImportError:
LOG.error(_('Module hplefthandclient not installed.'))
hplefthand_opts = [
cfg.StrOpt('hplefthand_api_url',
default=None,
help="HP LeftHand WSAPI Server Url like "
"https://<LeftHand ip>:8081/lhos"),
cfg.StrOpt('hplefthand_username',
default=None,
help="HP LeftHand Super user username"),
cfg.StrOpt('hplefthand_password',
default=None,
help="HP LeftHand Super user password",
secret=True),
cfg.StrOpt('hplefthand_clustername',
default=None,
help="HP LeftHand cluster name"),
cfg.BoolOpt('hplefthand_iscsi_chap_enabled',
default=False,
help='Configure CHAP authentication for iSCSI connections '
'(Default: Disabled)'),
cfg.BoolOpt('hplefthand_debug',
default=False,
help="Enable HTTP debugging to LeftHand"),
]
CONF = cfg.CONF
CONF.register_opts(hplefthand_opts)
# map the extra spec key to the REST client option key
extra_specs_key_map = {
'hplh:provisioning': 'isThinProvisioned',
'hplh:ao': 'isAdaptiveOptimizationEnabled',
'hplh:data_pl': 'dataProtectionLevel',
}
# map the extra spec value to the REST client option value
extra_specs_value_map = {
'isThinProvisioned': {'thin': True, 'full': False},
'isAdaptiveOptimizationEnabled': {'true': True, 'false': False},
'dataProtectionLevel': {
'r-0': 0, 'r-5': 1, 'r-10-2': 2, 'r-10-3': 3, 'r-10-4': 4, 'r-6': 5}
}
class HPLeftHandRESTProxy(ISCSIDriver):
"""Executes REST commands relating to HP/LeftHand SAN ISCSI volumes.
Version history:
1.0.0 - Initial REST iSCSI proxy
1.0.1 - Added support for retype
1.0.2 - Added support for volume migrate
1.0.3 - Fixed bug #1285829, HP LeftHand backend assisted migration
should check for snapshots
1.0.4 - Fixed bug #1285925, LeftHand AO volume create performance
improvement
"""
VERSION = "1.0.4"
device_stats = {}
def __init__(self, *args, **kwargs):
super(HPLeftHandRESTProxy, self).__init__(*args, **kwargs)
self.configuration.append_config_values(hplefthand_opts)
if not self.configuration.hplefthand_api_url:
raise exception.NotFound(_("HPLeftHand url not found"))
# blank is the only invalid character for cluster names
# so we need to use it as a separator
self.DRIVER_LOCATION = self.__class__.__name__ + ' %(cluster)s %(vip)s'
def do_setup(self, context):
"""Set up LeftHand client."""
try:
self.client = client.HPLeftHandClient(
self.configuration.hplefthand_api_url)
self.client.login(
self.configuration.hplefthand_username,
self.configuration.hplefthand_password)
if self.configuration.hplefthand_debug:
self.client.debug_rest(True)
cluster_info = self.client.getClusterByName(
self.configuration.hplefthand_clustername)
self.cluster_id = cluster_info['id']
virtual_ips = cluster_info['virtualIPAddresses']
self.cluster_vip = virtual_ips[0]['ipV4Address']
self._update_backend_status()
except hpexceptions.HTTPNotFound:
raise exception.DriverNotInitialized(
_('LeftHand cluster not found'))
except Exception as ex:
raise exception.DriverNotInitialized(ex)
def check_for_setup_error(self):
pass
def get_version_string(self):
return (_('REST %(proxy_ver)s hplefthandclient %(rest_ver)s') % {
'proxy_ver': self.VERSION,
'rest_ver': hplefthandclient.get_version_string()})
def create_volume(self, volume):
"""Creates a volume."""
try:
# get the extra specs of interest from this volume's volume type
volume_extra_specs = self._get_volume_extra_specs(volume)
extra_specs = self._get_lh_extra_specs(
volume_extra_specs,
extra_specs_key_map.keys())
# map the extra specs key/value pairs to key/value pairs
# used as optional configuration values by the LeftHand backend
optional = self._map_extra_specs(extra_specs)
# if provisioning is not set, default to thin
if 'isThinProvisioned' not in optional:
optional['isThinProvisioned'] = True
# AdaptiveOptimization defaults to 'true' if you don't specify the
# value on a create, and that is the most efficient way to create
# a volume. If you pass in 'false' or 'true' for AO, it will result
# in an update operation following the create operation to set this
# value, so it is best to not specify the value and let it default
# to 'true'.
if optional.get('isAdaptiveOptimizationEnabled'):
del optional['isAdaptiveOptimizationEnabled']
clusterName = self.configuration.hplefthand_clustername
optional['clusterName'] = clusterName
volume_info = self.client.createVolume(
volume['name'], self.cluster_id,
volume['size'] * units.Gi,
optional)
return self._update_provider(volume_info)
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
def delete_volume(self, volume):
"""Deletes a volume."""
try:
volume_info = self.client.getVolumeByName(volume['name'])
self.client.deleteVolume(volume_info['id'])
except hpexceptions.HTTPNotFound:
LOG.error(_("Volume did not exist. It will not be deleted"))
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
def extend_volume(self, volume, new_size):
"""Extend the size of an existing volume."""
try:
volume_info = self.client.getVolumeByName(volume['name'])
# convert GB to bytes
options = {'size': int(new_size) * units.Gi}
self.client.modifyVolume(volume_info['id'], options)
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
try:
volume_info = self.client.getVolumeByName(snapshot['volume_name'])
option = {'inheritAccess': True}
self.client.createSnapshot(snapshot['name'],
volume_info['id'],
option)
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
snap_info = self.client.getSnapshotByName(snapshot['name'])
self.client.deleteSnapshot(snap_info['id'])
except hpexceptions.HTTPNotFound:
LOG.error(_("Snapshot did not exist. It will not be deleted"))
except hpexceptions.HTTPServerError as ex:
in_use_msg = 'cannot be deleted because it is a clone point'
if in_use_msg in ex.get_description():
raise exception.SnapshotIsBusy(ex)
raise exception.VolumeBackendAPIException(ex)
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
def get_volume_stats(self, refresh):
"""Gets volume stats."""
if refresh:
self._update_backend_status()
return self.device_stats
def _update_backend_status(self):
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['reserved_percentage'] = 0
data['storage_protocol'] = 'iSCSI'
data['vendor_name'] = 'Hewlett-Packard'
data['location_info'] = (self.DRIVER_LOCATION % {
'cluster': self.configuration.hplefthand_clustername,
'vip': self.cluster_vip})
cluster_info = self.client.getCluster(self.cluster_id)
total_capacity = cluster_info['spaceTotal']
free_capacity = cluster_info['spaceAvailable']
# convert to GB
data['total_capacity_gb'] = int(total_capacity) / units.Gi
data['free_capacity_gb'] = int(free_capacity) / units.Gi
self.device_stats = data
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server.
Assign any created volume to a compute node/host so that it can be
used from that host. HP VSA requires a volume to be assigned
to a server.
"""
try:
server_info = self._create_server(connector)
volume_info = self.client.getVolumeByName(volume['name'])
self.client.addServerAccess(volume_info['id'], server_info['id'])
iscsi_properties = self._get_iscsi_properties(volume)
if ('chapAuthenticationRequired' in server_info
and server_info['chapAuthenticationRequired']):
iscsi_properties['auth_method'] = 'CHAP'
iscsi_properties['auth_username'] = connector['initiator']
iscsi_properties['auth_password'] = (
server_info['chapTargetSecret'])
return {'driver_volume_type': 'iscsi', 'data': iscsi_properties}
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
def terminate_connection(self, volume, connector, **kwargs):
"""Unassign the volume from the host."""
try:
volume_info = self.client.getVolumeByName(volume['name'])
server_info = self.client.getServerByName(connector['host'])
self.client.removeServerAccess(
volume_info['id'],
server_info['id'])
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
try:
snap_info = self.client.getSnapshotByName(snapshot['name'])
volume_info = self.client.cloneSnapshot(
volume['name'],
snap_info['id'])
return self._update_provider(volume_info)
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
def create_cloned_volume(self, volume, src_vref):
try:
volume_info = self.client.getVolumeByName(src_vref['name'])
self.client.cloneVolume(volume['name'], volume_info['id'])
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
def _get_volume_extra_specs(self, volume):
"""Get extra specs from a volume."""
extra_specs = {}
type_id = volume.get('volume_type_id', None)
if type_id is not None:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
extra_specs = volume_type.get('extra_specs')
return extra_specs
def _get_lh_extra_specs(self, extra_specs, valid_keys):
"""Get LeftHand extra_specs (valid_keys only)."""
extra_specs_of_interest = {}
for key, value in extra_specs.iteritems():
if key in valid_keys:
extra_specs_of_interest[key] = value
return extra_specs_of_interest
def _map_extra_specs(self, extra_specs):
"""Map the extra spec key/values to LeftHand key/values."""
client_options = {}
for key, value in extra_specs.iteritems():
# map extra spec key to lh client option key
client_key = extra_specs_key_map[key]
# map extra spect value to lh client option value
try:
value_map = extra_specs_value_map[client_key]
# an invalid value will throw KeyError
client_value = value_map[value]
client_options[client_key] = client_value
except KeyError:
LOG.error(_("'%(value)s' is an invalid value "
"for extra spec '%(key)s'") %
{'value': value, 'key': key})
return client_options
def _update_provider(self, volume_info):
# TODO(justinsb): Is this always 1? Does it matter?
cluster_interface = '1'
iscsi_portal = self.cluster_vip + ":3260," + cluster_interface
return {'provider_location': (
"%s %s %s" % (iscsi_portal, volume_info['iscsiIqn'], 0))}
def _create_server(self, connector):
server_info = None
chap_enabled = self.configuration.hplefthand_iscsi_chap_enabled
try:
server_info = self.client.getServerByName(connector['host'])
chap_secret = server_info['chapTargetSecret']
if not chap_enabled and chap_secret:
LOG.warning(_('CHAP secret exists for host %s but CHAP is '
'disabled') % connector['host'])
if chap_enabled and chap_secret is None:
LOG.warning(_('CHAP is enabled, but server secret not '
'configured on server %s') % connector['host'])
return server_info
except hpexceptions.HTTPNotFound:
# server does not exist, so create one
pass
optional = None
if chap_enabled:
chap_secret = utils.generate_password()
optional = {'chapName': connector['initiator'],
'chapTargetSecret': chap_secret,
'chapAuthenticationRequired': True
}
server_info = self.client.createServer(connector['host'],
connector['initiator'],
optional)
return server_info
def create_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to retype
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
try:
volume_info = self.client.getVolumeByName(volume['name'])
except hpexceptions.HTTPNotFound:
raise exception.VolumeNotFound(volume_id=volume['id'])
try:
# pick out the LH extra specs
new_extra_specs = dict(new_type).get('extra_specs')
lh_extra_specs = self._get_lh_extra_specs(
new_extra_specs,
extra_specs_key_map.keys())
LOG.debug('LH specs=%(specs)s' % {'specs': lh_extra_specs})
# only set the ones that have changed
changed_extra_specs = {}
for key, value in lh_extra_specs.iteritems():
(old, new) = diff['extra_specs'][key]
if old != new:
changed_extra_specs[key] = value
# map extra specs to LeftHand options
options = self._map_extra_specs(changed_extra_specs)
if len(options) > 0:
self.client.modifyVolume(volume_info['id'], options)
return True
except Exception as ex:
LOG.warning("%s" % ex)
return False
def migrate_volume(self, ctxt, volume, host):
"""Migrate the volume to the specified host.
Backend assisted volume migration will occur if and only if;
1. Same LeftHand backend
2. Volume cannot be attached
3. Volumes with snapshots cannot be migrated
4. Source and Destination clusters must be in the same management group
Volume re-type is not supported.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
'cluster=%(cluster)s' % {
'id': volume['id'],
'host': host,
'cluster': self.configuration.hplefthand_clustername})
false_ret = (False, None)
if 'location_info' not in host['capabilities']:
return false_ret
host_location = host['capabilities']['location_info']
(driver, cluster, vip) = host_location.split(' ')
try:
# get the cluster info, if it exists and compare
cluster_info = self.client.getClusterByName(cluster)
LOG.debug('Clister info: %s' % cluster_info)
virtual_ips = cluster_info['virtualIPAddresses']
if driver != self.__class__.__name__:
LOG.info(_("Cannot provide backend assisted migration for "
"volume: %s because volume is from a different "
"backend.") % volume['name'])
return false_ret
if vip != virtual_ips[0]['ipV4Address']:
LOG.info(_("Cannot provide backend assisted migration for "
"volume: %s because cluster exists in different "
"management group.") % volume['name'])
return false_ret
except hpexceptions.HTTPNotFound:
LOG.info(_("Cannot provide backend assisted migration for "
"volume: %s because cluster exists in different "
"management group.") % volume['name'])
return false_ret
try:
volume_info = self.client.getVolumeByName(volume['name'])
LOG.debug('Volume info: %s' % volume_info)
# can't migrate if server is attached
if volume_info['iscsiSessions'] is not None:
LOG.info(_("Cannot provide backend assisted migration "
"for volume: %s because the volume has been "
"exported.") % volume['name'])
return false_ret
# can't migrate if volume has snapshots
snap_info = self.client.getVolume(
volume_info['id'],
'fields=snapshots,snapshots[resource[members[name]]]')
LOG.debug('Snapshot info: %s' % snap_info)
if snap_info['snapshots']['resource'] is not None:
LOG.info(_("Cannot provide backend assisted migration "
"for volume: %s because the volume has "
"snapshots.") % volume['name'])
return false_ret
options = {'clusterName': cluster}
self.client.modifyVolume(volume_info['id'], options)
except hpexceptions.HTTPNotFound:
LOG.info(_("Cannot provide backend assisted migration for "
"volume: %s because volume does not exist in this "
"management group.") % volume['name'])
return false_ret
except hpexceptions.HTTPServerError as ex:
LOG.error(ex)
return false_ret
return (True, None)
| |
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the size of each given file and optionally computes the size of
libchrome.so without the dependencies added for building with android NDK.
Also breaks down the contents of the APK to determine the installed size
and assign size contributions to different classes of file.
"""
import collections
import json
import operator
import optparse
import os
import re
import sys
import tempfile
import zipfile
import zlib
import devil_chromium
from devil.utils import cmd_helper
from pylib.constants import host_paths
_GRIT_PATH = os.path.join(host_paths.DIR_SOURCE_ROOT, 'tools', 'grit')
with host_paths.SysPath(_GRIT_PATH):
from grit.format import data_pack # pylint: disable=import-error
with host_paths.SysPath(host_paths.BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
# Static initializers expected in official builds. Note that this list is built
# using 'nm' on libchrome.so which results from a GCC official build (i.e.
# Clang is not supported currently).
STATIC_INITIALIZER_SYMBOL_PREFIX = '_GLOBAL__I_'
EXPECTED_STATIC_INITIALIZERS = frozenset([
'allocators.cpp',
'common.pb.cc',
'defaults.cc',
'generated_message_util.cc',
'locale_impl.cpp',
'timeutils.cc',
'watchdog.cc',
# http://b/6354040
'SkFontHost_android.cpp',
# http://b/6354040
'isolate.cc',
'assembler_arm.cc',
'isolate.cc',
])
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'benchmark_description': 'APK resource size information.',
'trace_rerun_options': [],
'charts': {}
}
_RC_HEADER_RE = re.compile(r'^#define (?P<name>\w+) (?P<id>\d+)$')
def GetStaticInitializers(so_path):
"""Returns a list of static initializers found in the non-stripped library
located at the provided path. Note that this function assumes that the
library was compiled with GCC.
"""
output = cmd_helper.GetCmdOutput(['nm', so_path])
static_initializers = []
for line in output:
symbol_name = line.split(' ').pop().rstrip()
if STATIC_INITIALIZER_SYMBOL_PREFIX in symbol_name:
static_initializers.append(
symbol_name.replace(STATIC_INITIALIZER_SYMBOL_PREFIX, ''))
return static_initializers
def ReportPerfResult(chart_data, graph_title, trace_title, value, units,
improvement_direction='down', important=True):
"""Outputs test results in correct format.
If chart_data is None, it outputs data in old format. If chart_data is a
dictionary, formats in chartjson format. If any other format defaults to
old format.
"""
if chart_data and isinstance(chart_data, dict):
chart_data['charts'].setdefault(graph_title, {})
chart_data['charts'][graph_title][trace_title] = {
'type': 'scalar',
'value': value,
'units': units,
'improvement_direction': improvement_direction,
'important': important
}
else:
perf_tests_results_helper.PrintPerfResult(
graph_title, trace_title, [value], units)
def PrintResourceSizes(files, chartjson=None):
"""Prints the sizes of each given file.
Args:
files: List of files to print sizes for.
"""
for f in files:
ReportPerfResult(chartjson, 'ResourceSizes', os.path.basename(f) + ' size',
os.path.getsize(f), 'bytes')
def PrintApkAnalysis(apk_filename, chartjson=None):
"""Analyse APK to determine size contributions of different file classes."""
# Define a named tuple type for file grouping.
# name: Human readable name for this file group
# regex: Regular expression to match filename
# extracted: Function that takes a file name and returns whether the file is
# extracted from the apk at install/runtime.
FileGroup = collections.namedtuple('FileGroup',
['name', 'regex', 'extracted'])
# File groups are checked in sequence, so more specific regexes should be
# earlier in the list.
YES = lambda _: True
NO = lambda _: False
FILE_GROUPS = (
FileGroup('Native code', r'\.so$', lambda f: 'crazy' not in f),
FileGroup('Java code', r'\.dex$', YES),
FileGroup('Native resources (no l10n)', r'\.pak$', NO),
# For locale paks, assume only english paks are extracted.
FileGroup('Native resources (l10n)', r'\.lpak$', lambda f: 'en_' in f),
FileGroup('ICU (i18n library) data', r'assets/icudtl\.dat$', NO),
FileGroup('V8 Snapshots', r'\.bin$', NO),
FileGroup('PNG drawables', r'\.png$', NO),
FileGroup('Non-compiled Android resources', r'^res/', NO),
FileGroup('Compiled Android resources', r'\.arsc$', NO),
FileGroup('Package metadata', r'^(META-INF/|AndroidManifest\.xml$)', NO),
FileGroup('Unknown files', r'.', NO),
)
apk = zipfile.ZipFile(apk_filename, 'r')
try:
apk_contents = apk.infolist()
finally:
apk.close()
total_apk_size = os.path.getsize(apk_filename)
apk_basename = os.path.basename(apk_filename)
found_files = {}
for group in FILE_GROUPS:
found_files[group] = []
for member in apk_contents:
for group in FILE_GROUPS:
if re.search(group.regex, member.filename):
found_files[group].append(member)
break
else:
raise KeyError('No group found for file "%s"' % member.filename)
total_install_size = total_apk_size
for group in FILE_GROUPS:
apk_size = sum(member.compress_size for member in found_files[group])
install_size = apk_size
install_bytes = sum(f.file_size for f in found_files[group]
if group.extracted(f.filename))
install_size += install_bytes
total_install_size += install_bytes
ReportPerfResult(chartjson, apk_basename + '_Breakdown',
group.name + ' size', apk_size, 'bytes')
ReportPerfResult(chartjson, apk_basename + '_InstallBreakdown',
group.name + ' size', install_size, 'bytes')
transfer_size = _CalculateCompressedSize(apk_filename)
ReportPerfResult(chartjson, apk_basename + '_InstallSize',
'Estimated installed size', total_install_size, 'bytes')
ReportPerfResult(chartjson, apk_basename + '_InstallSize', 'APK size',
total_apk_size, 'bytes')
ReportPerfResult(chartjson, apk_basename + '_TransferSize',
'Transfer size (deflate)', transfer_size, 'bytes')
def IsPakFileName(file_name):
"""Returns whether the given file name ends with .pak or .lpak."""
return file_name.endswith('.pak') or file_name.endswith('.lpak')
def PrintPakAnalysis(apk_filename, min_pak_resource_size, build_type):
"""Print sizes of all resources in all pak files in |apk_filename|."""
print
print 'Analyzing pak files in %s...' % apk_filename
# A structure for holding details about a pak file.
Pak = collections.namedtuple(
'Pak', ['filename', 'compress_size', 'file_size', 'resources'])
# Build a list of Pak objets for each pak file.
paks = []
apk = zipfile.ZipFile(apk_filename, 'r')
try:
for i in (x for x in apk.infolist() if IsPakFileName(x.filename)):
with tempfile.NamedTemporaryFile() as f:
f.write(apk.read(i.filename))
f.flush()
paks.append(Pak(i.filename, i.compress_size, i.file_size,
data_pack.DataPack.ReadDataPack(f.name).resources))
finally:
apk.close()
# Output the overall pak file summary.
total_files = len(paks)
total_compress_size = sum(pak.compress_size for pak in paks)
total_file_size = sum(pak.file_size for pak in paks)
print 'Total pak files: %d' % total_files
print 'Total compressed size: %s' % _FormatBytes(total_compress_size)
print 'Total uncompressed size: %s' % _FormatBytes(total_file_size)
print
# Output the table of details about all pak files.
print '%25s%11s%21s%21s' % (
'FILENAME', 'RESOURCES', 'COMPRESSED SIZE', 'UNCOMPRESSED SIZE')
for pak in sorted(paks, key=operator.attrgetter('file_size'), reverse=True):
print '%25s %10s %12s %6.2f%% %12s %6.2f%%' % (
pak.filename,
len(pak.resources),
_FormatBytes(pak.compress_size),
100.0 * pak.compress_size / total_compress_size,
_FormatBytes(pak.file_size),
100.0 * pak.file_size / total_file_size)
print
print 'Analyzing pak resources in %s...' % apk_filename
# Calculate aggregate stats about resources across pak files.
resource_count_map = collections.defaultdict(int)
resource_size_map = collections.defaultdict(int)
resource_overhead_bytes = 6
for pak in paks:
for r in pak.resources:
resource_count_map[r] += 1
resource_size_map[r] += len(pak.resources[r]) + resource_overhead_bytes
# Output the overall resource summary.
total_resource_size = sum(resource_size_map.values())
total_resource_count = len(resource_count_map)
assert total_resource_size <= total_file_size
print 'Total pak resources: %s' % total_resource_count
print 'Total uncompressed resource size: %s' % _FormatBytes(
total_resource_size)
print
resource_id_name_map = _GetResourceIdNameMap(build_type)
# Output the table of details about all resources across pak files.
print
print '%56s %5s %17s' % ('RESOURCE', 'COUNT', 'UNCOMPRESSED SIZE')
for i in sorted(resource_size_map, key=resource_size_map.get,
reverse=True):
if resource_size_map[i] >= min_pak_resource_size:
print '%56s %5s %9s %6.2f%%' % (
resource_id_name_map.get(i, i),
resource_count_map[i],
_FormatBytes(resource_size_map[i]),
100.0 * resource_size_map[i] / total_resource_size)
def _GetResourceIdNameMap(build_type):
"""Returns a map of {resource_id: resource_name}."""
out_dir = os.path.join(host_paths.DIR_SOURCE_ROOT, 'out', build_type)
assert os.path.isdir(out_dir), 'Failed to locate out dir at %s' % out_dir
print 'Looking at resources in: %s' % out_dir
grit_headers = []
for root, _, files in os.walk(out_dir):
if root.endswith('grit'):
grit_headers += [os.path.join(root, f) for f in files if f.endswith('.h')]
assert grit_headers, 'Failed to find grit headers in %s' % out_dir
id_name_map = {}
for header in grit_headers:
with open(header, 'r') as f:
for line in f.readlines():
m = _RC_HEADER_RE.match(line.strip())
if m:
i = int(m.group('id'))
name = m.group('name')
if i in id_name_map and name != id_name_map[i]:
print 'WARNING: Resource ID conflict %s (%s vs %s)' % (
i, id_name_map[i], name)
id_name_map[i] = name
return id_name_map
def PrintStaticInitializersCount(so_with_symbols_path, chartjson=None):
"""Emits the performance result for static initializers found in the provided
shared library. Additionally, files for which static initializers were
found are printed on the standard output.
Args:
so_with_symbols_path: Path to the unstripped libchrome.so file.
"""
print 'Files with static initializers:'
static_initializers = GetStaticInitializers(so_with_symbols_path)
print '\n'.join(static_initializers)
ReportPerfResult(chartjson, 'StaticInitializersCount', 'count',
len(static_initializers), 'count')
def _FormatBytes(byts):
"""Pretty-print a number of bytes."""
if byts > 2**20.0:
byts /= 2**20.0
return '%.2fm' % byts
if byts > 2**10.0:
byts /= 2**10.0
return '%.2fk' % byts
return str(byts)
def _CalculateCompressedSize(file_path):
CHUNK_SIZE = 256 * 1024
compressor = zlib.compressobj()
total_size = 0
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(CHUNK_SIZE), ''):
total_size += len(compressor.compress(chunk))
total_size += len(compressor.flush())
return total_size
def main(argv):
usage = """Usage: %prog [options] file1 file2 ...
Pass any number of files to graph their sizes. Any files with the extension
'.apk' will be broken down into their components on a separate graph."""
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option('--so-path', help='Path to libchrome.so.')
option_parser.add_option('--so-with-symbols-path',
help='Path to libchrome.so with symbols.')
option_parser.add_option('--min-pak-resource-size', type='int',
default=20*1024,
help='Minimum byte size of displayed pak resources.')
option_parser.add_option('--build_type', dest='build_type', default='Debug',
help='Sets the build type, default is Debug.')
option_parser.add_option('--chartjson', action="store_true",
help='Sets output mode to chartjson.')
option_parser.add_option('--output-dir', default='.',
help='Directory to save chartjson to.')
option_parser.add_option('-d', '--device',
help='Dummy option for perf runner.')
options, args = option_parser.parse_args(argv)
files = args[1:]
chartjson = _BASE_CHART.copy() if options.chartjson else None
# For backward compatibilty with buildbot scripts, treat --so-path as just
# another file to print the size of. We don't need it for anything special any
# more.
if options.so_path:
files.append(options.so_path)
if not files:
option_parser.error('Must specify a file')
devil_chromium.Initialize()
if options.so_with_symbols_path:
PrintStaticInitializersCount(
options.so_with_symbols_path, chartjson=chartjson)
PrintResourceSizes(files, chartjson=chartjson)
for f in files:
if f.endswith('.apk'):
PrintApkAnalysis(f, chartjson=chartjson)
PrintPakAnalysis(f, options.min_pak_resource_size, options.build_type)
if chartjson:
results_path = os.path.join(options.outpur_dir, 'results-chart.json')
with open(results_path, 'w') as json_file:
json.dump(chartjson, json_file)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
from datetime import datetime, timedelta
from logging import getLogger
from collections import defaultdict
from sqlalchemy.orm import contains_eager, joinedload
from sqlalchemy.sql import func
from flask import Blueprint
from flask.templating import render_template
from flask.globals import g, request
from overc import __version__
from overc.lib.db import models
from overc.lib.flask.json import jsonapi
bp = Blueprint('ui', __name__, url_prefix='/ui', template_folder='templates',
static_folder='static', static_url_path='/static'
)
logger = getLogger(__name__)
#region UI
@bp.route('/', methods=['GET'])
def index():
""" Index page """
return render_template('pages/index.htm', overc_version=__version__)
#endregion
#region API
@bp.route('/api/status/')
@bp.route('/api/status/server/<int:server_id>')
@bp.route('/api/status/service/<int:service_id>')
@jsonapi
def api_status(server_id=None, service_id=None):
""" Get all available information """
ssn = g.db
# Filter servers
servers = ssn.query(models.Server) \
.join(models.Server.services) \
.options(contains_eager(models.Server.services)) \
.filter(
models.Server.id == server_id if server_id else True,
models.Service.id == service_id if service_id else True
) \
.all()
# Count alerts for 24h
alert_counts = ssn.query(
models.Alert.server_id,
models.Alert.service_id,
func.count(models.Alert)
) \
.filter(
models.Alert.ctime >= (datetime.utcnow() - timedelta(hours=24)),
models.Alert.server_id == server_id if server_id else True,
models.Alert.service_id == service_id if service_id else True
) \
.group_by(models.Alert.server_id, models.Alert.service_id) \
.all()
server_alerts = defaultdict(lambda: 0)
service_alerts = defaultdict(lambda: 0)
total_alerts = 0
for (srv_id, svc_id, n) in alert_counts:
total_alerts += n
if svc_id:
service_alerts[svc_id] += n
elif srv_id:
server_alerts[srv_id] += n
# Test whether there are any service states not checked, which probably means the supervisor thread is no running
last_checked = ssn.query(func.min(models.ServiceState.rtime)) \
.filter(
models.ServiceState.checked == False,
) \
.scalar()
supervisor_lag = (datetime.utcnow() - last_checked).total_seconds() if last_checked else 0.0
# Last state id
last_state_id = ssn.query(func.max(models.ServiceState.id)) \
.filter(models.ServiceState.service_id == service_id if service_id else True) \
.scalar()
# Format
return {
# Statistics
'stats': {
'n_alerts': total_alerts, # alerts today (for all selected servers)
'last_state_id': last_state_id, # Last ServiceState.id
'supervisor_lag': supervisor_lag, # Seconds ago the supervisor process last checked something
},
# Servers & Services
'servers': sorted([
{
'id': server.id,
'name': server.name,
'title': server.title,
'ip': server.ip,
'n_alerts': server_alerts[server.id], # alerts today, for this server
'services': sorted([
{
'id': service.id,
'period': service.period,
'name': service.name,
'title': service.title,
'n_alerts': service_alerts[service.id], # alerts today, for this service
'state': {
'rtime': service.state.rtime.isoformat(sep=' '),
'timed_out': service.timed_out,
'seen_ago': str(datetime.utcnow() - service.state.rtime).split('.')[0],
'state': service.state.state,
'info': service.state.info,
} if service.state else None
} for service in server.services
], cmp, lambda s: s['name'])
} for server in servers
], cmp, lambda s: s['name'])
}
@bp.route('/api/status/service/<int:service_id>/states')
@jsonapi
def api_status_service_states(service_id):
""" Service states for 24h """
ssn = g.db
dtime = timedelta(hours=float(request.args.get('hours', default=24)))
# Load states & alerts
states = ssn.query(models.ServiceState) \
.options(joinedload(models.ServiceState.alerts)) \
.filter(
models.ServiceState.rtime >= (datetime.utcnow() - dtime),
models.ServiceState.service_id == service_id
) \
.order_by(models.ServiceState.id.desc()) \
.all()
# Collapse
groups = request.args.get('groups', default=False)
if groups:
# Go through states and detect sequences of states with no changes: these are replaced with Groups
# A "change": state change or alerts
#: List of groups to expand: [ (id1, id2), ... ]
expand = request.args.getlist('expand', lambda v: map(int, v.split('-'))) if request.args.has_key('expand') else ()
# Detect groups
prev_state = None
cur_group = None
groups = []
for i, s in enumerate(states):
# Init group
if cur_group is None:
cur_group = [i, None]
# Detect changes
has_changes = s.state != prev_state
has_changes |= len(s.alerts)
if has_changes:
# Put group
groups.append(cur_group)
# Unset group
cur_group = None
else:
# Store id
cur_group[1] = i
# Memo
prev_state = s.state
groups.append(cur_group)
# Filter groups
groups = [ (grp[0], grp[1]-1) # Always include both border items
for grp in groups if
grp is not None and # Ignore: empty groups
grp[1] is not None and # Ignore: incomplete groups
(grp[1] - grp[0]) > 1 # Ignore: small groups of 0,1 items
]
# Replace groups
for grp in reversed(groups):
ss = (states[grp[1]], states[grp[0]])
ss_ids = (ss[0].id, ss[1].id)
# Skip expanded groups
if any(ss_ids[0] <= e[0] <= ss_ids[1] or ss_ids[0] <= e[1] <= ss_ids[1] for e in expand):
continue
# Replace with group
states[grp[0] : grp[1]+1] = [ {
'id': ss[0].id, # Just for Angular
'state': ss[0].state,
'group': '-'.join(map(str, ss_ids)),
'group_count': grp[1] - grp[0] + 1
} ]
# Format
return {
'states': [
{
'id': state.id,
'rtime': state.rtime.isoformat(sep=' '),
'state': state.state,
'info': state.info,
'alerts': [ {
'id': alert.id,
'channel': alert.channel,
'event': alert.event,
'message': alert.message,
'severity': models.state_t.states[alert.severity]
} for alert in state.alerts ],
'service': unicode(state.service),
'service_id': state.service_id,
} if isinstance(state, models.ServiceState) else state # Groups :)
for state in states
]
}
@bp.route('/api/status/alerts/')
@bp.route('/api/status/alerts/server/<int:server_id>')
@bp.route('/api/status/alerts/service/<int:service_id>')
@jsonapi
def api_status_alerts(server_id=None, service_id=None):
""" Alerts for 24h """
ssn = g.db
dtime = timedelta(hours=float(request.args.get('hours', default=24)))
# Load alerts
alerts = ssn.query(models.Alert) \
.filter(
models.Alert.ctime >= (datetime.utcnow() - dtime),
models.Alert.server_id == server_id if server_id else True,
models.Alert.service_id == service_id if service_id else True
) \
.order_by(models.Alert.id.desc()) \
.all()
# Format
return {
'alerts': [
{
'id': alert.id,
'server': unicode(alert.server) if alert.server else None,
'server_id': alert.server_id,
'service': unicode(alert.service) if alert.service else None,
'service_id': alert.service_id,
'ctime': alert.ctime.isoformat(sep=' '),
'channel': alert.channel,
'event': alert.event,
'message': alert.message,
'state_info': alert.service_state.info if alert.service_state else None
}
for alert in alerts
]
}
#region Items
@bp.route('/api/item/server/<int:server_id>', methods=['DELETE'])
@jsonapi
def api_server_delete(server_id):
""" Server CRUD: Delete """
ssn = g.db
server = ssn.query(models.Server).get(server_id)
ssn.delete(server)
ssn.commit()
return {'ok': 1}
@bp.route('/api/item/service/<int:service_id>', methods=['DELETE'])
@jsonapi
def api_service_delete(service_id):
""" Service CRUD: Delete """
ssn = g.db
service = ssn.query(models.Service).get(service_id)
ssn.delete(service)
ssn.commit()
return {'ok': 1}
#endregion
# TODO: API to rename servers, services
# TODO: API to test alert plugins
#endregion
| |
#------------------------------------------------------------------------------
# Copyright (C) 2007 Richard W. Lincoln
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANDABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" Defines an ellipse component.
References:
Jose.R.Fonseca, 'XDot', http://code.google.com/p/jrfonseca/wiki/XDot
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from math import pi, sqrt
from enthought.traits.api import Instance, Float, Int, Bool, on_trait_change
from enthought.traits.ui.api import View, Item, Group
from enthought.enable.api import Component, Pointer
from enthought.kiva import FILL_STROKE
from pen import Pen
#------------------------------------------------------------------------------
# "Ellipse" class:
#------------------------------------------------------------------------------
class Ellipse(Component):
""" Component with Ellipse traits """
#--------------------------------------------------------------------------
# "Ellipse" interface:
#--------------------------------------------------------------------------
# Pen used to draw the ellipse
pen = Instance(Pen, desc="Pen instance with which to draw the ellipse")
# X-axis coordinate of ellipse origin
x_origin = Float(desc="x-axis coordinate of ellipse origin")
# Y-axis coordinate of ellipse origin
y_origin = Float(desc="y-axis coordinate of ellipse origin")
# Width of the ellipse (semi-major axis)
e_width = Float(desc="Ellipse width")
# Height of the ellipse (semi-minor axis)
e_height = Float(desc="Ellipse height")
# Is the ellipse filled?
filled = Bool(False, desc="Fill the ellipse")
# The background color of this component.
bgcolor = "transparent" #"fuchsia"
#--------------------------------------------------------------------------
# Views:
#--------------------------------------------------------------------------
traits_view = View(
Group(
Item("pen", style="custom", show_label=False),
label="Pen", show_border=True
),
Item("x_origin"), Item("y_origin"),
Item("e_width", label="Width"),
Item("e_height", label="Height"),
Item("filled")
)
#--------------------------------------------------------------------------
# Draw component on the graphics context:
#--------------------------------------------------------------------------
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"):
""" Draws the component """
x_origin = self.x_origin
y_origin = self.y_origin
gc.save_state()
try:
# self._draw_bounds(gc)
gc.begin_path()
gc.translate_ctm(x_origin, y_origin)
gc.scale_ctm(self.e_width, self.e_height)
gc.arc(0.0, 0.0, 1.0, 0, 2.0*pi)
gc.close_path()
# Draw stroke at same scale as graphics context
# ctm = gc.get_ctm()
# if hasattr(ctm, "__len__") and len(ctm) == 6:
# scale = sqrt( (ctm[0]+ctm[1]) * (ctm[0]+ctm[1]) / 2.0 + \
# (ctm[2]+ctm[3]) * (ctm[2]+ctm[3]) / 2.0 )
# elif hasattr(gc, "get_ctm_scale"):
# scale = gc.get_ctm_scale()
# else:
# raise RuntimeError("Unable to get scale from GC.")
gc.set_line_width(self.pen.line_width)
gc.set_stroke_color(self.pen.color_)
if self.filled:
gc.set_fill_color(self.pen.fill_color_)
gc.draw_path(FILL_STROKE)
else:
gc.stroke_path()
finally:
gc.restore_state()
def is_in(self, point_x, point_y):
""" Test if the point is within this ellipse """
x = self.x_origin
y = self.y_origin
a = self.e_width#/2 # FIXME: Why divide by two
b = self.e_height#/2
return ((point_x-x)**2/(a**2)) + ((point_y-y)**2/(b**2)) < 1.0
def _draw_bounds(self, gc):
""" Draws the component bounds for testing purposes """
dx, dy = self.bounds
x, y = self.position
gc.rect(x, y, dx, dy)
gc.stroke_path()
def normal_left_down(self, event):
""" Handles left mouse button clicks in 'normal' mode """
print "Ellipse selected at (%d, %d)" % (event.x, event.y)
# def _position_changed(self, new):
# """ Handles the position of the component changing.
# """
# x, y = new
# self.x_origin = x + self.e_width
# self.y_origin = y + self.e_height
@on_trait_change("pen.+,x_origin,y_origin,e_width,e_height,filled,container")
def _update(self):
x_origin = self.x_origin
y_origin = self.y_origin
x = x_origin - (self.e_width)
x2 = x_origin + (self.e_width)
y = y_origin-(self.e_height)
y2 = y_origin + (self.e_height)
self.position = [x, y]
# If bounds are set to 0, horizontal/vertical lines will not render.
self.bounds = [ max(x2-x, 1), max(y2-y, 1) ]
self.request_redraw()
#------------------------------------------------------------------------------
# Stand-alone call:
#------------------------------------------------------------------------------
if __name__ == "__main__":
from godot.component.component_viewer import ComponentViewer
pen = Pen()
ellipse = Ellipse(
# filled=True,
pen=pen, x_origin=200, y_origin=150, e_width=100, e_height=50,
# bounds=[50, 50], position=[0, 0]
)
from enthought.enable.api import Container
container = Container(
# fit_window=False, auto_size=True,
bounds=[200, 100], position=[100, 100],
bgcolor="green")
container.add( ellipse )
ellipse.x_origin -= container.x
ellipse.y_origin -= container.y
viewer = ComponentViewer( component=container )
from enthought.enable.primitives.api import Box
box = Box(
color="steelblue", border_color="darkorchid", border_size=1,
bounds=[50, 50], position=[50, 50]
)
viewer.canvas.add(box)
viewer.configure_traits()
# EOF -------------------------------------------------------------------------
| |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import List, Literal, Optional, TypedDict
from .activity import PartialPresenceUpdate
from .voice import GuildVoiceState
from .integration import BaseIntegration, IntegrationApplication
from .role import Role
from .channel import ChannelType, StageInstance
from .interactions import Interaction
from .invite import InviteTargetType
from .emoji import Emoji, PartialEmoji
from .member import MemberWithUser
from .snowflake import Snowflake
from .message import Message
from .sticker import GuildSticker
from .appinfo import GatewayAppInfo, PartialAppInfo
from .guild import Guild, UnavailableGuild
from .user import User
from .threads import Thread, ThreadMember
from .scheduled_event import GuildScheduledEvent
class SessionStartLimit(TypedDict):
total: int
remaining: int
reset_after: int
max_concurrency: int
class Gateway(TypedDict):
url: str
class GatewayBot(Gateway):
shards: int
session_start_limit: SessionStartLimit
class ShardInfo(TypedDict):
shard_id: int
shard_count: int
class ReadyEvent(TypedDict):
v: int
user: User
guilds: List[UnavailableGuild]
session_id: str
shard: ShardInfo
application: GatewayAppInfo
ResumedEvent = Literal[None]
MessageCreateEvent = Message
class _MessageDeleteEventOptional(TypedDict, total=False):
guild_id: Snowflake
class MessageDeleteEvent(_MessageDeleteEventOptional):
id: Snowflake
channel_id: Snowflake
class _MessageDeleteBulkEventOptional(TypedDict, total=False):
guild_id: Snowflake
class MessageDeleteBulkEvent(_MessageDeleteBulkEventOptional):
ids: List[Snowflake]
channel_id: Snowflake
class MessageUpdateEvent(Message):
channel_id: Snowflake
class _MessageReactionAddEventOptional(TypedDict, total=False):
member: MemberWithUser
guild_id: Snowflake
class MessageReactionAddEvent(_MessageReactionAddEventOptional):
user_id: Snowflake
channel_id: Snowflake
message_id: Snowflake
emoji: PartialEmoji
class _MessageReactionRemoveEventOptional(TypedDict, total=False):
guild_id: Snowflake
class MessageReactionRemoveEvent(_MessageReactionRemoveEventOptional):
user_id: Snowflake
channel_id: Snowflake
message_id: Snowflake
emoji: PartialEmoji
class _MessageReactionRemoveAllEventOptional(TypedDict, total=False):
guild_id: Snowflake
class MessageReactionRemoveAllEvent(_MessageReactionRemoveAllEventOptional):
message_id: Snowflake
channel_id: Snowflake
class _MessageReactionRemoveEmojiEventOptional(TypedDict, total=False):
guild_id: Snowflake
class MessageReactionRemoveEmojiEvent(_MessageReactionRemoveEmojiEventOptional):
emoji: PartialEmoji
message_id: Snowflake
channel_id: Snowflake
InteractionCreateEvent = Interaction
PresenceUpdateEvent = PartialPresenceUpdate
UserUpdateEvent = User
class _InviteCreateEventOptional(TypedDict, total=False):
guild_id: Snowflake
inviter: User
target_type: InviteTargetType
target_user: User
target_application: PartialAppInfo
class InviteCreateEvent(_InviteCreateEventOptional):
channel_id: Snowflake
code: str
created_at: str
max_age: int
max_uses: int
temporary: bool
uses: Literal[0]
class _InviteDeleteEventOptional(TypedDict, total=False):
guild_id: Snowflake
class InviteDeleteEvent(_InviteDeleteEventOptional):
channel_id: Snowflake
code: str
class _ChannelEvent(TypedDict):
id: Snowflake
type: ChannelType
ChannelCreateEvent = ChannelUpdateEvent = ChannelDeleteEvent = _ChannelEvent
class _ChannelPinsUpdateEventOptional(TypedDict, total=False):
guild_id: Snowflake
last_pin_timestamp: Optional[str]
class ChannelPinsUpdateEvent(_ChannelPinsUpdateEventOptional):
channel_id: Snowflake
class _ThreadCreateEventOptional(TypedDict, total=False):
newly_created: bool
members: List[ThreadMember]
class ThreadCreateEvent(Thread, _ThreadCreateEventOptional):
...
ThreadUpdateEvent = Thread
class ThreadDeleteEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
parent_id: Snowflake
type: ChannelType
class _ThreadListSyncEventOptional(TypedDict, total=False):
channel_ids: List[Snowflake]
class ThreadListSyncEvent(_ThreadListSyncEventOptional):
guild_id: Snowflake
threads: List[Thread]
members: List[ThreadMember]
class ThreadMemberUpdate(ThreadMember):
guild_id: Snowflake
class _ThreadMembersUpdateOptional(TypedDict, total=False):
added_members: List[ThreadMember]
removed_member_ids: List[Snowflake]
class ThreadMembersUpdate(_ThreadMembersUpdateOptional):
id: Snowflake
guild_id: Snowflake
member_count: int
class GuildMemberAddEvent(MemberWithUser):
guild_id: Snowflake
class GuildMemberRemoveEvent(TypedDict):
guild_id: Snowflake
user: User
class _GuildMemberUpdateEventOptional(TypedDict, total=False):
nick: str
premium_since: Optional[str]
deaf: bool
mute: bool
pending: bool
communication_disabled_until: str
class GuildMemberUpdateEvent(_GuildMemberUpdateEventOptional):
guild_id: Snowflake
roles: List[Snowflake]
user: User
avatar: Optional[str]
joined_at: Optional[str]
class GuildEmojisUpdateEvent(TypedDict):
guild_id: Snowflake
emojis: List[Emoji]
class GuildStickersUpdateEvent(TypedDict):
guild_id: Snowflake
stickers: List[GuildSticker]
GuildCreateEvent = GuildUpdateEvent = Guild
GuildDeleteEvent = UnavailableGuild
class _GuildBanEvent(TypedDict):
guild_id: Snowflake
user: User
GuildBanAddEvent = GuildBanRemoveEvent = _GuildBanEvent
class _GuildRoleEvent(TypedDict):
guild_id: Snowflake
role: Role
class GuildRoleDeleteEvent(TypedDict):
guild_id: Snowflake
role_id: Snowflake
GuildRoleCreateEvent = GuildRoleUpdateEvent = _GuildRoleEvent
class _GuildMembersChunkEventOptional(TypedDict, total=False):
not_found: List[Snowflake]
presences: List[PresenceUpdateEvent]
nonce: str
class GuildMembersChunkEvent(_GuildMembersChunkEventOptional):
guild_id: Snowflake
members: List[MemberWithUser]
chunk_index: int
chunk_count: int
class GuildIntegrationsUpdateEvent(TypedDict):
guild_id: Snowflake
class _IntegrationEventOptional(BaseIntegration, total=False):
role_id: Optional[Snowflake]
enable_emoticons: bool
subscriber_count: int
revoked: bool
application: IntegrationApplication
class _IntegrationEvent(_IntegrationEventOptional):
guild_id: Snowflake
IntegrationCreateEvent = IntegrationUpdateEvent = _IntegrationEvent
class _IntegrationDeleteEventOptional(TypedDict, total=False):
application_id: Snowflake
class IntegrationDeleteEvent(_IntegrationDeleteEventOptional):
id: Snowflake
guild_id: Snowflake
class WebhooksUpdateEvent(TypedDict):
guild_id: Snowflake
channel_id: Snowflake
StageInstanceCreateEvent = StageInstanceUpdateEvent = StageInstanceDeleteEvent = StageInstance
GuildScheduledEventCreateEvent = GuildScheduledEventUpdateEvent = GuildScheduledEventDeleteEvent = GuildScheduledEvent
class _GuildScheduledEventUsersEvent(TypedDict):
guild_scheduled_event_id: Snowflake
user_id: Snowflake
guild_id: Snowflake
GuildScheduledEventUserAdd = GuildScheduledEventUserRemove = _GuildScheduledEventUsersEvent
VoiceStateUpdateEvent = GuildVoiceState
class VoiceServerUpdateEvent(TypedDict):
token: str
guild_id: Snowflake
endpoint: Optional[str]
class _TypingStartEventOptional(TypedDict, total=False):
guild_id: Snowflake
member: MemberWithUser
class TypingStartEvent(_TypingStartEventOptional):
channel_id: Snowflake
user_id: Snowflake
timestamp: int
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Vote.vote_type'
db.add_column(u'laws_vote', 'vote_type',
self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Vote.vote_type'
db.delete_column(u'laws_vote', 'vote_type')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'committees.committee': {
'Meta': {'object_name': 'Committee'},
'aliases': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'chairpersons': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'chaired_committees'", 'blank': 'True', 'to': u"orm['mks.Member']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committees'", 'blank': 'True', 'to': u"orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'portal_knesset_broadcasts_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'blank': 'True'}),
'replacements': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'replacing_in_committees'", 'blank': 'True', 'to': u"orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "'committee'", 'max_length': '10'})
},
u'committees.committeemeeting': {
'Meta': {'ordering': "('-date',)", 'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': u"orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': u"orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': u"orm['laws.Vote']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'events.event': {
'Meta': {'object_name': 'Event'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'when_over': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_over_guessed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'where': ('django.db.models.fields.TextField', [], {'default': "u'earth'"}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['persons.Person']", 'null': 'True', 'symmetrical': 'False'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
u'laws.bill': {
'Meta': {'ordering': "('-stage_date', '-id')", 'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': u"orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': u"orm['laws.Vote']"}),
'full_title': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['mks.Member']"}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': u"orm['laws.Law']"}),
'popular_name': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'popular_name_slug': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['laws.Vote']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['committees.CommitteeMeeting']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'laws.billbudgetestimation': {
'Meta': {'unique_together': "(('bill', 'estimator'),)", 'object_name': 'BillBudgetEstimation'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'budget_ests'", 'to': u"orm['laws.Bill']"}),
'estimator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'budget_ests'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'one_time_ext': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'one_time_gov': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'yearly_ext': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'yearly_gov': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'laws.candidatelistvotingstatistics': {
'Meta': {'object_name': 'CandidateListVotingStatistics'},
'candidates_list': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': u"orm['polyorg.CandidateList']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'laws.govlegislationcommitteedecision': {
'Meta': {'object_name': 'GovLegislationCommitteeDecision'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'gov_decisions'", 'null': 'True', 'to': u"orm['laws.Bill']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'stand': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'laws.govproposal': {
'Meta': {'object_name': 'GovProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'gov_proposal'", 'unique': 'True', 'null': 'True', 'to': u"orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'laws_govproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'laws_govproposal_related'", 'null': 'True', 'to': u"orm['laws.Law']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'laws_govproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['laws.Vote']"})
},
u'laws.knessetproposal': {
'Meta': {'object_name': 'KnessetProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'knesset_proposal'", 'unique': 'True', 'null': 'True', 'to': u"orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': u"orm['committees.Committee']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'laws_knessetproposal_related'", 'null': 'True', 'to': u"orm['laws.Law']"}),
'originals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'knesset_proposals'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['laws.PrivateProposal']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['laws.Vote']"})
},
u'laws.law': {
'Meta': {'object_name': 'Law'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_into': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'duplicates'", 'null': 'True', 'to': u"orm['laws.Law']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'laws.membervotingstatistics': {
'Meta': {'object_name': 'MemberVotingStatistics'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': u"orm['mks.Member']"})
},
u'laws.partyvotingstatistics': {
'Meta': {'object_name': 'PartyVotingStatistics'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': u"orm['mks.Party']"})
},
u'laws.privateproposal': {
'Meta': {'object_name': 'PrivateProposal'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proposals'", 'null': 'True', 'to': u"orm['laws.Bill']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'proposals_joined'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['mks.Member']"}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'laws_privateproposal_related'", 'null': 'True', 'to': u"orm['laws.Law']"}),
'proposal_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'proposals_proposed'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['mks.Member']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['laws.Vote']"})
},
u'laws.vote': {
'Meta': {'ordering': "('-time', '-id')", 'object_name': 'Vote'},
'against_coalition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_own_bill': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'for_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'vote_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': u"orm['laws.VoteAction']", 'to': u"orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_own_bill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['laws.Vote']"})
},
u'mks.knesset': {
'Meta': {'object_name': 'Knesset'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': u"orm['mks.Party']"}),
'current_position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': u"orm['mks.Membership']", 'to': u"orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Party']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'unique_together': "(('knesset', 'name'),)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'knesset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parties'", 'null': 'True', 'to': u"orm['mks.Knesset']"}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': u"orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['persons.Title']"}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'persons.title': {
'Meta': {'object_name': 'Title'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
u'polyorg.candidate': {
'Meta': {'ordering': "('ordinal',)", 'object_name': 'Candidate'},
'candidates_list': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polyorg.CandidateList']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordinal': ('django.db.models.fields.IntegerField', [], {}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polyorg.Party']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['persons.Person']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'polyorg.candidatelist': {
'Meta': {'object_name': 'CandidateList'},
'ballot': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'candidates': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['persons.Person']", 'null': 'True', 'through': u"orm['polyorg.Candidate']", 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mpg_html_report': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'platform': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'surplus_partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['polyorg.CandidateList']", 'null': 'True', 'blank': 'True'}),
'twitter_account': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'wikipedia_page': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'youtube_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'})
},
u'polyorg.party': {
'Meta': {'object_name': 'Party'},
'accepts_memberships': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
u'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': u"orm['tagging.Tag']"})
}
}
complete_apps = ['laws']
| |
'''
'''
import struct
# this gets populated via the class decorators on the DataType subclasses
DATA_TYPE_REGISTRY = {}
def data_type(name):
'''Decorator that:
a) populates the DATA_TYPE_REGISTRY
b) modifies the decorated class to include a DATA_TYPE_NAME field
'''
def dummy(cls):
DATA_TYPE_REGISTRY[name] = cls
class NewClass(cls):
DATA_TYPE_NAME = name
return NewClass
return dummy
class DataType:
@classmethod
def default(cls):
return None
@classmethod
def to_wire(cls, data):
raise NotImplementedError('to_wire not implemented for {}'.format(cls))
@classmethod
def from_wire(cls, data, offset, fullsize):
raise NotImplementedError(
'from_wire not implemented for {}'.format(cls))
@data_type(name='varint')
class VarInt(DataType):
@classmethod
def default(cls):
return 0
@classmethod
def from_wire(cls, data, offset, fullsize):
'''Receives a bytearray and returns an integer.'''
acc = []
while True:
acc.insert(0, data[offset] & 0x7f)
if data[offset] & 0x80 == 0:
break
offset += 1
shifts = (len(acc) - 1) * 7
data = 0
for x in acc:
data += (x << shifts)
shifts -= 7
return data, len(acc)
@classmethod
def to_wire(cls, data):
'''Receives an integer and returns a bytearray.'''
acc = bytearray()
val = data
while val > 0x7f:
seg = val & 0x7f
rem = val >> 7
acc.append(seg | 0x80)
val = rem
acc.append(val)
return acc
@data_type(name='string')
class String(DataType):
@classmethod
def default(cls):
return ''
@classmethod
def from_wire(cls, data, offset, fullsize):
# read length as varint
string_length, varint_length = VarInt.from_wire(data, offset, fullsize)
# read length bytes and convert to python string
value = data[offset + varint_length:
offset + varint_length + string_length]
value = value.decode()
return value, varint_length + string_length
@classmethod
def to_wire(cls, data):
assert isinstance(data, (type(''), type(u'')))
retval = bytearray()
encoded_string = data.encode('utf-8')
retval.extend(VarInt.to_wire(len(encoded_string)))
retval.extend(encoded_string)
return retval
@data_type(name='i8')
class Int8(DataType):
@classmethod
def default(cls):
return 0
@classmethod
def from_wire(cls, data, offset, fullsize):
return struct.unpack('!b', data[offset:offset + 1])[0], 1
@classmethod
def to_wire(cls, data):
return struct.pack('!b', data)
@data_type(name='u8')
class UnsignedInt8(DataType):
@classmethod
def default(cls):
return 0
@classmethod
def to_wire(cls, data):
return struct.pack('!B', data)
@classmethod
def from_wire(cls, data, offset, fullsize):
return struct.unpack('!B', data[offset:offset + 1])[0], 1
@data_type(name='u16')
class UnsignedInt16(DataType):
@classmethod
def default(cls):
return 0
@classmethod
def to_wire(cls, data):
# NOTE int.to_bytes is python3 specific
return data.to_bytes(2, byteorder='big', signed=False)
@data_type(name='i16')
class Int16(DataType):
@classmethod
def default(cls):
return 0
@classmethod
def from_wire(cls, data, offset, fullsize):
return int.from_bytes(data[offset:offset + 2], 'big', signed=True), 2
@classmethod
def to_wire(cls, data):
return data.to_bytes(2, byteorder='big', signed=True)
@data_type(name='i32')
class Int32(DataType):
@classmethod
def default(cls):
return 0
@classmethod
def from_wire(cls, data, offset, fullsize):
return struct.unpack('!l', data[offset:offset + 4])[0], 4
@classmethod
def to_wire(cls, data):
return data.to_bytes(4, byteorder='big', signed=True)
@data_type(name='u32')
class UnsignedInt32(DataType):
@classmethod
def default(cls):
return 0
@classmethod
def from_wire(cls, data, offset, fullsize):
return struct.unpack('!L', data[offset:offset + 4])[0], 4
@classmethod
def to_wire(cls, data):
return data.to_bytes(4, byteorder='big', signed=False)
@data_type(name='i64')
class Int64(DataType):
@classmethod
def default(cls):
return 0
@classmethod
def from_wire(cls, data, offset, fullsize):
return struct.unpack('!q', data[offset:offset + 8])[0], 8
@data_type(name='u64')
class UnsignedInt64(DataType):
@classmethod
def default(cls):
return 0
@classmethod
def from_wire(cls, data, offset, fullsize):
return struct.unpack('!Q', data[offset:offset + 8])[0], 8
@classmethod
def to_wire(cls, data):
return struct.pack('!Q', data)
@data_type(name='entityMetadata')
class EntityMetadata(DataType):
@classmethod
def default(cls):
return None
@classmethod
def from_wire(cls, data, offset, fullsize):
# TODO
return None, 0
@classmethod
def to_wire(cls, data):
raise NotImplementedError()
@data_type(name='slot')
class Slot(DataType):
@classmethod
def default(cls):
return Slot()
@classmethod
def from_wire(cls, data, offset, fullsize):
new_offset = offset
block_id, consumed = Int16.from_wire(data, new_offset, fullsize)
new_offset += consumed
if block_id == -1:
return Slot(block_id=block_id), consumed
item_count, consumed = Int8.from_wire(data, new_offset, fullsize)
new_offset += consumed
item_damage, consumed = Int16.from_wire(data, new_offset, fullsize)
new_offset += consumed
# we don't parse this - just store the raw data until
# someone actually needs it
nbt_data = data[new_offset:new_offset + (fullsize-new_offset)]
new_offset = fullsize
return Slot(
block_id=block_id,
item_count=item_count,
item_damage=item_damage,
nbt=nbt_data
), new_offset
@classmethod
def to_wire(cls, data):
raise NotImplementedError()
def __init__(self, block_id=-1, item_count=None, item_damage=None, nbt=None):
self.block_id = block_id
self.item_count = item_count
self.item_damage = item_damage
self.nbt = nbt
@data_type(name='UUID')
class UUID(DataType):
@classmethod
def default(cls):
return (None, None)
@classmethod
def from_wire(cls, data, offset, fullsize):
lower = struct.unpack('!Q', data[offset:offset + 8])[0], 8
upper = struct.unpack('!Q', data[offset:offset + 8])[0], 8
return (upper, lower), 16
@classmethod
def to_wire(cls, data):
upper, lower = data
return struct.pack('!Q', lower)
return struct.pack('!Q', upper)
@data_type(name='f32')
class Float32(DataType):
@classmethod
def default(cls):
return 0.0
@classmethod
def from_wire(cls, data, offset, fullsize):
return struct.unpack('!f', data[offset:offset + 4])[0], 4
@classmethod
def to_wire(cls, data):
return struct.pack('!f', data)
@data_type(name='f64')
class Float64(DataType):
@classmethod
def default(cls):
return 0.0
@classmethod
def from_wire(cls, data, offset, fullsize):
return struct.unpack('!d', data[offset:offset + 8])[0], 8
@classmethod
def to_wire(cls, data):
return struct.pack('!d', data)
@data_type(name='bool')
class Bool(DataType):
@classmethod
def default(cls):
return False
@classmethod
def from_wire(cls, data, offset, fullsize):
if data[offset] == 0x1:
return True, 1
else:
return False, 1
@classmethod
def to_wire(cls, data):
if data is True:
return b'1'
else:
return b'0'
@data_type(name='restBuffer')
class RestBuffer(DataType):
@classmethod
def default(cls):
return None
@classmethod
def from_wire(cls, data, offset, fullsize):
return data[offset:fullsize - offset], fullsize - offset
@data_type(name='buffer')
class Buffer(DataType):
@classmethod
def default(cls):
return None
@classmethod
def from_wire(cls, data, offset, fullsize):
# read the length (varint)
buffer_length, varint_length = VarInt.from_wire(data, offset, fullsize)
# get the rest of the buffer
return data[offset + varint_length:
offset + varint_length + buffer_length], varint_length + buffer_length
@classmethod
def to_wire(cls, data):
retval = bytearray()
retval.extend(VarInt.to_wire(len(data)))
retval.extend([x for x in data])
return retval
@data_type(name='array')
class Array(DataType):
@classmethod
def default(cls):
return None
@classmethod
def from_wire(cls, data, offset, fullsize):
# read the length (varint)
packet_length, varint_length = VarInt.from_wire(data, offset, fullsize)
# get the rest of the array
return data[offset + varint_length:
packet_length], varint_length + packet_length
@data_type(name='position')
class Position(DataType):
@classmethod
def default(cls):
return Position()
@classmethod
def to_wire(cls, position):
x = int(position.x)
y = int(position.y)
z = int(position.z)
if x < 0:
x = x - (1 << 26)
if y < 0:
y = y - (1 << 12)
if z < 0:
z = z - (1 << 26)
val = ((x & 0x3FFFFFF) << 38) | ((y & 0xFFF) << 26) | (z & 0x3FFFFFF)
return UnsignedInt64.to_wire(val)
@classmethod
def from_wire(cls, data, offset, fullsize):
value, bytes_consumed = UnsignedInt64.from_wire(data, offset, fullsize)
x = value >> 38
y = (value >> 26) & 0xFFF
z = value & 0x3ffffff
if x >= (1 << 25):
x = x - (1 << 26)
if y > (1 << 11):
y = y - (1 << 12)
if z > (1 << 25):
z = z - (1 << 26)
obj = Position(x, y, z)
return obj, bytes_consumed
def __init__(self, x=0, y=0, z=0):
self.x, self.y, self.z = (x, y, z)
| |
"""The tests for the Group components."""
# pylint: disable=protected-access,too-many-public-methods
import unittest
from homeassistant.bootstrap import _setup_component
from homeassistant.const import (
STATE_ON, STATE_OFF, STATE_HOME, STATE_UNKNOWN, ATTR_ICON, ATTR_HIDDEN,
ATTR_ASSUMED_STATE, )
import homeassistant.components.group as group
from tests.common import get_test_home_assistant
class TestComponentsGroup(unittest.TestCase):
"""Test Group component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_group_with_mixed_groupable_states(self):
"""Try to setup a group with mixed groupable states."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('device_tracker.Paulus', STATE_HOME)
group.Group(
self.hass, 'person_and_light',
['light.Bowl', 'device_tracker.Paulus'])
self.assertEqual(
STATE_ON,
self.hass.states.get(
group.ENTITY_ID_FORMAT.format('person_and_light')).state)
def test_setup_group_with_a_non_existing_state(self):
"""Try to setup a group with a non existing state."""
self.hass.states.set('light.Bowl', STATE_ON)
grp = group.Group(
self.hass, 'light_and_nothing',
['light.Bowl', 'non.existing'])
self.assertEqual(STATE_ON, grp.state)
def test_setup_group_with_non_groupable_states(self):
"""Test setup with groups which are not groupable."""
self.hass.states.set('cast.living_room', "Plex")
self.hass.states.set('cast.bedroom', "Netflix")
grp = group.Group(
self.hass, 'chromecasts',
['cast.living_room', 'cast.bedroom'])
self.assertEqual(STATE_UNKNOWN, grp.state)
def test_setup_empty_group(self):
"""Try to setup an empty group."""
grp = group.Group(self.hass, 'nothing', [])
self.assertEqual(STATE_UNKNOWN, grp.state)
def test_monitor_group(self):
"""Test if the group keeps track of states."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
# Test if group setup in our init mode is ok
self.assertIn(test_group.entity_id, self.hass.states.entity_ids())
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_ON, group_state.state)
self.assertTrue(group_state.attributes.get(group.ATTR_AUTO))
def test_group_turns_off_if_all_off(self):
"""Test if turn off if the last device that was on turns off."""
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_OFF, group_state.state)
def test_group_turns_on_if_all_are_off_and_one_turns_on(self):
"""Test if turn on if all devices were turned off and one turns on."""
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
# Turn one on
self.hass.states.set('light.Ceiling', STATE_ON)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_ON, group_state.state)
def test_is_on(self):
"""Test is_on method."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
self.assertTrue(group.is_on(self.hass, test_group.entity_id))
self.hass.states.set('light.Bowl', STATE_OFF)
self.hass.pool.block_till_done()
self.assertFalse(group.is_on(self.hass, test_group.entity_id))
# Try on non existing state
self.assertFalse(group.is_on(self.hass, 'non.existing'))
def test_expand_entity_ids(self):
"""Test expand_entity_ids method."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
self.assertEqual(sorted(['light.ceiling', 'light.bowl']),
sorted(group.expand_entity_ids(
self.hass, [test_group.entity_id])))
def test_expand_entity_ids_does_not_return_duplicates(self):
"""Test that expand_entity_ids does not return duplicates."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.expand_entity_ids(
self.hass, [test_group.entity_id, 'light.Ceiling'])))
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.expand_entity_ids(
self.hass, ['light.bowl', test_group.entity_id])))
def test_expand_entity_ids_ignores_non_strings(self):
"""Test that non string elements in lists are ignored."""
self.assertEqual([], group.expand_entity_ids(self.hass, [5, True]))
def test_get_entity_ids(self):
"""Test get_entity_ids method."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
self.assertEqual(
['light.bowl', 'light.ceiling'],
sorted(group.get_entity_ids(self.hass, test_group.entity_id)))
def test_get_entity_ids_with_domain_filter(self):
"""Test if get_entity_ids works with a domain_filter."""
self.hass.states.set('switch.AC', STATE_OFF)
mixed_group = group.Group(
self.hass, 'mixed_group', ['light.Bowl', 'switch.AC'], False)
self.assertEqual(
['switch.ac'],
group.get_entity_ids(
self.hass, mixed_group.entity_id, domain_filter="switch"))
def test_get_entity_ids_with_non_existing_group_name(self):
"""Test get_entity_ids with a non existing group."""
self.assertEqual([], group.get_entity_ids(self.hass, 'non_existing'))
def test_get_entity_ids_with_non_group_state(self):
"""Test get_entity_ids with a non group state."""
self.assertEqual([], group.get_entity_ids(self.hass, 'switch.AC'))
def test_group_being_init_before_first_tracked_state_is_set_to_on(self):
"""Test if the groups turn on.
If no states existed and now a state it is tracking is being added
as ON.
"""
test_group = group.Group(
self.hass, 'test group', ['light.not_there_1'])
self.hass.states.set('light.not_there_1', STATE_ON)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_ON, group_state.state)
def test_group_being_init_before_first_tracked_state_is_set_to_off(self):
"""Test if the group turns off.
If no states existed and now a state it is tracking is being added
as OFF.
"""
test_group = group.Group(
self.hass, 'test group', ['light.not_there_1'])
self.hass.states.set('light.not_there_1', STATE_OFF)
self.hass.pool.block_till_done()
group_state = self.hass.states.get(test_group.entity_id)
self.assertEqual(STATE_OFF, group_state.state)
def test_setup(self):
"""Test setup method."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group', ['light.Bowl', 'light.Ceiling'], False)
_setup_component(self.hass, 'group', {'group': {
'second_group': {
'entities': 'light.Bowl, ' + test_group.entity_id,
'icon': 'mdi:work',
'view': True,
},
'test_group': 'hello.world,sensor.happy',
'empty_group': {'name': 'Empty Group', 'entities': None},
}
})
group_state = self.hass.states.get(
group.ENTITY_ID_FORMAT.format('second_group'))
self.assertEqual(STATE_ON, group_state.state)
self.assertEqual(set((test_group.entity_id, 'light.bowl')),
set(group_state.attributes['entity_id']))
self.assertIsNone(group_state.attributes.get(group.ATTR_AUTO))
self.assertEqual('mdi:work',
group_state.attributes.get(ATTR_ICON))
self.assertTrue(group_state.attributes.get(group.ATTR_VIEW))
self.assertTrue(group_state.attributes.get(ATTR_HIDDEN))
group_state = self.hass.states.get(
group.ENTITY_ID_FORMAT.format('test_group'))
self.assertEqual(STATE_UNKNOWN, group_state.state)
self.assertEqual(set(('sensor.happy', 'hello.world')),
set(group_state.attributes['entity_id']))
self.assertIsNone(group_state.attributes.get(group.ATTR_AUTO))
self.assertIsNone(group_state.attributes.get(ATTR_ICON))
self.assertIsNone(group_state.attributes.get(group.ATTR_VIEW))
self.assertIsNone(group_state.attributes.get(ATTR_HIDDEN))
def test_groups_get_unique_names(self):
"""Two groups with same name should both have a unique entity id."""
grp1 = group.Group(self.hass, 'Je suis Charlie')
grp2 = group.Group(self.hass, 'Je suis Charlie')
self.assertNotEqual(grp1.entity_id, grp2.entity_id)
def test_expand_entity_ids_expands_nested_groups(self):
"""Test if entity ids epands to nested groups."""
group.Group(self.hass, 'light', ['light.test_1', 'light.test_2'])
group.Group(self.hass, 'switch', ['switch.test_1', 'switch.test_2'])
group.Group(self.hass, 'group_of_groups', ['group.light',
'group.switch'])
self.assertEqual(
['light.test_1', 'light.test_2', 'switch.test_1', 'switch.test_2'],
sorted(group.expand_entity_ids(self.hass,
['group.group_of_groups'])))
def test_set_assumed_state_based_on_tracked(self):
"""Test assumed state."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
test_group = group.Group(
self.hass, 'init_group',
['light.Bowl', 'light.Ceiling', 'sensor.no_exist'])
state = self.hass.states.get(test_group.entity_id)
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
self.hass.states.set('light.Bowl', STATE_ON, {
ATTR_ASSUMED_STATE: True
})
self.hass.pool.block_till_done()
state = self.hass.states.get(test_group.entity_id)
self.assertTrue(state.attributes.get(ATTR_ASSUMED_STATE))
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.pool.block_till_done()
state = self.hass.states.get(test_group.entity_id)
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
| |
import os, sys
from traits.api \
import HasTraits, Str, Float, Int, List, Bool, Enum, Instance, Button,Float
from traitsui.api \
import TreeEditor, TreeNode, View, Item, VSplit, \
HGroup, VGroup, Handler, Group, Tabbed,spring
from traitsui.menu \
import Menu, Action, Separator
from traitsui.wx.tree_editor \
import NewAction, CopyAction, CutAction, \
PasteAction, DeleteAction, RenameAction
import parameters as par
import numpy as n
import general
#TODO: Maybe Xxx_Params classes should hold an array of xxxParams objects (e.g. ptvParams, calibParams etc.), so the correct path is always read/written
#Answer: I do not understand the question, Alex.
#define handler function for main parameters
class ParamHandler(Handler):
def closed(self,info,is_ok):
mainParams = info.object
par_path = mainParams.par_path
Handler.closed(self,info,is_ok)
if is_ok:
img_name = [mainParams.Name_1_Image, mainParams.Name_2_Image,\
mainParams.Name_3_Image, mainParams.Name_4_Image]
img_cal_name = [mainParams.Cali_1_Image, mainParams.Cali_2_Image,\
mainParams.Cali_3_Image,mainParams.Cali_4_Image]
gvthres = [mainParams.Gray_Tresh_1,mainParams.Gray_Tresh_2,\
mainParams.Gray_Tresh_3,mainParams.Gray_Tresh_4]
base_name = [mainParams.Basename_1_Seq, mainParams.Basename_2_Seq,\
mainParams.Basename_3_Seq, mainParams.Basename_4_Seq]
X_lay = [mainParams.Xmin, mainParams.Xmax]
Zmin_lay = [mainParams.Zmin1, mainParams.Zmin2]
Zmax_lay = [mainParams.Zmax1, mainParams.Zmax2]
#write ptv_par
par.PtvParams(mainParams.Num_Cam, img_name, img_cal_name,\
mainParams.HighPass, mainParams.Accept_OnlyAllCameras,\
mainParams.tiff_flag, mainParams.imx, mainParams.imy,\
mainParams.pix_x, mainParams.pix_y, mainParams.chfield,\
mainParams.Refr_Air, mainParams.Refr_Glass, \
mainParams.Refr_Water, mainParams.Thick_Glass, path = par_path).write()
#write calibration parameters
par.CalOriParams(mainParams.Num_Cam,mainParams.fixp_name,\
mainParams.img_cal_name,mainParams.img_ori,mainParams.tiff_flag,\
mainParams.pair_Flag,mainParams.chfield, path = par_path).write()
#write targ_rec_par
par.TargRecParams(mainParams.Num_Cam, gvthres, mainParams.Tol_Disc,\
mainParams.Min_Npix ,mainParams.Max_Npix,\
mainParams.Min_Npix_x, mainParams.Max_Npix_x,\
mainParams.Min_Npix_y, mainParams.Max_Npix_y,\
mainParams.Sum_Grey, mainParams.Size_Cross, path = par_path).write()
#write pft_version_par
par.PftVersionParams(mainParams.Existing_Target, path = par_path).write()
#write sequence_par
par.SequenceParams(mainParams.Num_Cam, base_name,\
mainParams.Seq_First, mainParams.Seq_Last, path = par_path).write()
#write criteria_par
par.CriteriaParams(X_lay,Zmin_lay,Zmax_lay,mainParams.Min_Corr_nx,\
mainParams.Min_Corr_ny,mainParams.Min_Corr_npix ,\
mainParams.Sum_gv,mainParams.Min_Weight_corr ,\
mainParams.Tol_Band, path = par_path).write()
#define handler function for calibration parameters
class CalHandler(Handler):
def closed(self,info,is_ok):
calibParams = info.object
par_path = calibParams.par_path
Handler.closed(self,info,is_ok)
if is_ok:
img_cal_name = [calibParams.cam_1, calibParams.cam_2,calibParams.cam_3,calibParams.cam_4]
img_ori =[calibParams.ori_cam_1,calibParams.ori_cam_2,calibParams.ori_cam_3,calibParams.ori_cam_4]
nr1 = [calibParams.img_1_p1,calibParams.img_1_p2,calibParams.img_1_p3,calibParams.img_1_p4]
nr2 = [calibParams.img_2_p1,calibParams.img_2_p2,calibParams.img_2_p3,calibParams.img_2_p4]
nr3 = [calibParams.img_3_p1,calibParams.img_3_p2,calibParams.img_3_p3,calibParams.img_3_p4]
nr4 = [calibParams.img_4_p1,calibParams.img_4_p2,calibParams.img_4_p3,calibParams.img_4_p4]
nr = [nr1, nr2, nr3, nr4]
if(calibParams.chfield == "Frame"):
chfield = 0
elif (calibParams.chfield == "Field odd"):
chfield = 1
else :
chfield = 2
par.PtvParams(calibParams.n_img, calibParams.img_name,\
calibParams.img_cal, calibParams.hp_flag,\
calibParams.allCam_flag, calibParams.tiff_head, calibParams.h_image_size, \
calibParams.v_image_size,calibParams.h_pixel_size, calibParams.v_pixel_size, chfield,\
calibParams.mmp_n1, calibParams.mmp_n2, \
calibParams.mmp_n3, calibParams.mmp_d, path = par_path).write()
par.CalOriParams(calibParams.n_img,calibParams.fixp_name,\
img_cal_name,img_ori,calibParams.tiff_head,\
calibParams.pair_head, chfield, path = par_path).write()
par.DetectPlateParams(calibParams.grey_value_treshold_1, \
calibParams.grey_value_treshold_2, \
calibParams.grey_value_treshold_3, \
calibParams.grey_value_treshold_4, \
calibParams.tolerable_discontinuity, \
calibParams.min_npix, calibParams.max_npix, \
calibParams.min_npix_x, calibParams.max_npix_x, \
calibParams.min_npix_y, calibParams.max_npix_y, \
calibParams.sum_of_grey, \
calibParams.size_of_crosses, path = par_path).write()
par.ManOriParams(calibParams.n_img, 4, nr, path = par_path).write()
par.ExamineParams(calibParams.Examine_Flag,calibParams.Combine_Flag, path = par_path).write()
par.OrientParams(calibParams.point_number_of_orientation, calibParams.principle_distance,\
calibParams.xp, calibParams.yp, calibParams.k1, calibParams.k2,\
calibParams.k3, calibParams.p1, calibParams.p2,\
calibParams.scx, calibParams.she,calibParams.interf, path = par_path).write()
par.ShakingParams(calibParams.shaking_first_frame, calibParams.shaking_last_frame, \
calibParams.shaking_max_num_points, calibParams.shaking_max_num_frames, path = par_path).write()
par.DumbbellParams(calibParams.dumbbell_eps,calibParams.dumbbell_scale,\
calibParams.dumbbell_gradient_descent,calibParams.dumbbell_penalty_weight,\
calibParams.dumbbell_step,calibParams.dumbbell_niter, path = par_path).write()
class TrackHandler(Handler):
def closed(self,info,is_ok):
Handler.closed(self,info,is_ok)
if is_ok:
print "Michael:", info.object.dvxmin, type(info.object.dvxmin)
info.object.write()
#This is the view class of the Tracking Parameters window
class Tracking_Params(par.TrackingParams):
Tracking_Params_View = View( HGroup( Item(name='dvxmin', label='dvxmin:'), Item(name='dvxmax', label='dvxmax:') ),
HGroup( Item(name='dvymin', label='dvymin:'), Item(name='dvymax', label='dvymax:') ),
HGroup( Item(name='dvzmin', label='dvzmin:'), Item(name='dvzmax', label='dvzmax:') ),
VGroup( Item(name='angle', label='angle [gon]:'), Item(name='dacc', label='dacc:') ),
Item(name='flagNewParticles', label='Add new particles position'),
buttons = [ 'Undo', 'OK', 'Cancel' ],
handler = TrackHandler(),
title = 'Tracking Parameters')
def __init__(self, par_path):
self.par_path = par_path
par.TrackingParams.__init__(self, path = par_path)
self.read()
class Main_Params (HasTraits):
#loading parameters files:
# read main parameters
# Panel 1: General
Num_Cam = Int(4,label='Number of cameras: ')
Accept_OnlyAllCameras=Bool(False, label='Accept only points seen from all cameras?')
pair_Flag = Bool(False, label = "Include pairs")
pair_enable_flag = Bool(True)
all_enable_flag = Bool(True)
hp_enable_flag = Bool(True)
# add here also size of the images, e.g. 1280 x 1024 pix and the size of the pixels.
# future option: name of the camera from the list with these parameters saved once somewhere, e.g.
# Mikrotron EoSense (1280 x 1024, 12 micron pixels)
# Future - this should be kind of more flexible, e.g.
# select only some name structure: CamX.YYYYY is clear that the X should be 1-Num_Cam and YYYY should be
# the running counter of the images. or Cam.X_00YYY.TIFF is also kind of clear that we have 5 digits with
# same could be for calibration, we have no point to create different names for 4 cameras:
# calX_run3 will be fine as a base name and X is 1 - Num_Cam
# not clear yet how to use the variable name later. probably we need to build it as a structure
# and use it as: for cam in range(Num_Cam):
# Name_Pre_Image[cam] = ''.join(BaseName,eval(cam),'.',eval(counter))
#
#unused parameters
#TODO: then why are they here?
#Answer: historical reasons, back compatibility
tiff_flag = Bool()
imx = Int()
imy = Int()
pix_x = Float()
pix_y = Float()
chfield = Int()
img_cal_name=[]
#unsed for calibration
fixp_name = Str()
img_ori = []
Name_1_Image=Str("",label='Name of 1. image')
Name_2_Image=Str("",label='Name of 2. image')
Name_3_Image=Str("",label='Name of 3. image')
Name_4_Image=Str("",label='Name of 4. image')
Cali_1_Image=Str("",label='Calibration data for 1. image')
Cali_2_Image=Str("",label='Calibration data for 2. image')
Cali_3_Image=Str("",label='Calibration data for 3. image')
Cali_4_Image=Str("",label='Calibration data for 4. image')
# TiffHeader=Bool(True,label='Tiff header') -> probably obsolete for the Python imread () function
# FrameType=Enum('Frame','Field-odd','Field-even') -> obsolete
# future option: List -> Select Media 1 (for each one): {'Air','Glass','Water','Custom'}, etc.
Refr_Air=Float(1.0,label='Air:')
Refr_Glass=Float(1.33,label='Glass:')
Refr_Water=Float(1.46, label='Water:')
Thick_Glass=Float(1.0,label='Thickness of glass:')
# New panel 2: ImageProcessing
HighPass=Bool(True,label='High pass filter')
# future option: Slider between 0 and 1 for each one
Gray_Tresh_1=Int('', label='1st image')
Gray_Tresh_2=Int('', label='2nd image')
Gray_Tresh_3=Int('', label='3rd image')
Gray_Tresh_4=Int('', label='4th image')
Min_Npix=Int('',label='min npix')
Max_Npix=Int('',label='max npix')
Min_Npix_x=Int('',label='min npix x')
Max_Npix_x=Int('',label='max npix x')
Min_Npix_y=Int('',label='min npix y')
Max_Npix_y=Int('',label='max npix y')
Sum_Grey=Int('', label='Sum of grey value')
Tol_Disc=Int('',label='Tolerable discontinuity')
Size_Cross=Int(2,label='Size of crosses')
Subtr_Mask=Bool(False,label='Subtract mask')
Base_Name_Mask=Str('',label='Base name for the mask')
Existing_Target = Bool(False, label = 'Use existing_target files?')
# New panel 3: Sequence
Seq_First=Int('', label='First sequence image:')
Seq_Last=Int('',label='Last sequence image:')
Basename_1_Seq=Str('',label='Basename for 1. sequence')
Basename_2_Seq=Str('',label='Basename for 2. sequence')
Basename_3_Seq=Str('',label='Basename for 3. sequence')
Basename_4_Seq=Str('',label='Basename for 4. sequence')
# Panel 4: ObservationVolume
Xmin=Int('',label='Xmin')
Xmax=Int('',label='Xmax')
Zmin1=Int('',label='Zmin')
Zmin2=Int('',label='Zmin')
Zmax1=Int('',label='Zmax')
Zmax2=Int('',label='Zmax')
# Panel 5: ParticleDetection
Min_Corr_nx=Float('',label='min corr for ratio nx')
Min_Corr_ny=Float('',label='min corr for ratio ny')
Min_Corr_npix=Float('',label='min corr for ratio npix')
Sum_gv=Float('',label='sum of gv')
Min_Weight_corr=Float('',label='min for weighted correlation')
Tol_Band=Float('',lable='Tolerance of epipolar band [mm]')
# Group 1 is the group of General parameters
# number of cameras, use only quadruplets or also triplets/pairs?
# names of the test images, calibration files
Group1 = Group(
Group(
Item(name='Num_Cam',width=30),
Item(name='Accept_OnlyAllCameras',enabled_when='all_enable_flag'),
Item(name = 'pair_Flag',enabled_when='pair_enable_flag'),
Item(name ='HighPass',enabled_when='hp_enable_flag'),
orientation='horizontal'
),
Group(
Group(
Item(name='Name_1_Image',width=150),
Item(name='Name_2_Image'),
Item(name='Name_3_Image'),
Item(name='Name_4_Image'),
orientation='vertical'
),
Group(
Item(name='Cali_1_Image',width=150),
Item(name='Cali_2_Image'),
Item(name='Cali_3_Image'),
Item(name='Cali_4_Image'),
orientation='vertical'
),
orientation='horizontal'
),
orientation='vertical',
label = 'General'
)
Group2 = Group(
Group(
Item(name='Refr_Air'),
Item(name='Refr_Glass'),
Item(name='Refr_Water'),
Item(name='Thick_Glass'),
orientation='horizontal'),
label='Refractive Indices',
show_border=True,
orientation='vertical')
Group3 = Group(
Group(
Item(name='Gray_Tresh_1'),
Item(name='Gray_Tresh_2'),
Item(name='Gray_Tresh_3'),
Item(name='Gray_Tresh_4'),
label='Gray value treshold: ',
show_border=True,
orientation='horizontal'
),
Group(
Group(
Item(name="Min_Npix"),
Item(name='Max_Npix'),
Item(name='Sum_Grey'),
orientation='vertical'
),
Group(
Item(name='Min_Npix_x'),
Item(name='Max_Npix_x'),
Item(name='Tol_Disc'),
orientation='vertical'
),
Group(
Item(name='Min_Npix_y'),
Item(name='Max_Npix_y'),
Item(name='Size_Cross'),
orientation='vertical'
),
orientation = 'horizontal'),
Group(
Item(name='Subtr_Mask'),
Item(name='Base_Name_Mask'),
Item(name='Existing_Target'),
orientation='horizontal'
),
orientation = 'vertical',
show_border=True,
label='Particle recognition')
Group4 = Group(
Group(
Item(name='Seq_First'),
Item(name='Seq_Last'),
orientation='horizontal'
),
Group(
Item(name='Basename_1_Seq'),
Item(name='Basename_2_Seq'),
Item(name='Basename_3_Seq'),
Item(name='Basename_4_Seq'),
orientation='vertical'
),
label='Parameters for sequence processing',
orientation='vertical',
show_border=True
)
Group5 = Group(
Group(
Item(name='Xmin'),
Item(name='Xmax'),
orientation='vertical'
),
Group(
Item(name='Zmin1'),
Item(name='Zmin2'),
orientation='vertical'
),
Group(
Item(name='Zmax1'),
Item(name='Zmax2'),
orientation='vertical'
),
orientation='horizontal',
label='Observation Volume',
show_border=True
)
Group6 = Group(
Group(
Item(name='Min_Corr_nx'),
Item(name='Min_Corr_npix'),
Item(name='Min_Weight_corr'),
orientation='vertical'
),
Group(
Item(name='Min_Corr_ny'),
Item(name='Sum_gv'),
Item(name='Tol_Band'),
orientation='vertical'
),
orientation='horizontal',
label='Criteria for correspondences',
show_border=True
)
Main_Params_View = View(
Tabbed(
Group1,
Group2,
Group3 ,
Group4,
Group5,
Group6),
resizable = True,
width = 0.5,
height = 0.3,
dock = 'horizontal',
buttons = [ 'Undo', 'OK', 'Cancel' ],
handler = ParamHandler(),
title='Main Parameters')
def _pair_Flag_fired(self):
#print("test")
if self.pair_Flag:
self.all_enable_flag = False
else:
self.all_enable_flag = True
def _Accept_OnlyAllCameras_fired(self):
if self.Accept_OnlyAllCameras:
self.pair_enable_flag = False
else:
self.pair_enable_flag = True
#TODO: underscore in Python signifies a private method (i.e. it shouldn't be accessed from outside this module).
# Answer: change it to the proper names. here it probably means just 'reload'
def _reload(self):
#load ptv_par
ptvParams = par.PtvParams(path = self.par_path)
ptvParams.read()
(n_img, img_name, img_cal, hp_flag, allCam_flag, tiff_flag, imx, imy, pix_x, pix_y, chfield, mmp_n1, mmp_n2, mmp_n3, mmp_d) = \
(ptvParams.n_img, ptvParams.img_name, ptvParams.img_cal, ptvParams.hp_flag, ptvParams.allCam_flag, ptvParams.tiff_flag, \
ptvParams.imx, ptvParams.imy, ptvParams.pix_x, ptvParams.pix_y, ptvParams.chfield, ptvParams.mmp_n1, ptvParams.mmp_n2, ptvParams.mmp_n3, ptvParams.mmp_d)
self.Name_1_Image = img_name[0]
self.Name_2_Image = img_name[1]
self.Name_3_Image = img_name[2]
self.Name_4_Image = img_name[3]
self.Cali_1_Image = img_cal[0]
self.Cali_2_Image = img_cal[1]
self.Cali_3_Image = img_cal[2]
self.Cali_4_Image = img_cal[3]
self.Refr_Air = mmp_n1
self.Refr_Glass = mmp_n2
self.Refr_Water = mmp_n3
self.Thick_Glass = mmp_d
self.Accept_OnlyAllCameras = n.bool(allCam_flag)
self.Num_Cam = n_img
self.HighPass = n.bool(hp_flag)
#load unused
self.tiff_flag = n.bool(tiff_flag)
self.imx = imx
self.imy = imy
self.pix_x = pix_x
self.pix_y = pix_y
self.chfield = chfield
#read_calibration parameters
calOriParams = par.CalOriParams(n_img, path = self.par_path)
calOriParams.read()
(fixp_name, img_cal_name, img_ori, tiff_flag, pair_flag, chfield) = \
(calOriParams.fixp_name, calOriParams.img_cal_name, calOriParams.img_ori, \
calOriParams.tiff_flag, calOriParams.pair_flag, calOriParams.chfield)
self.pair_Flag = n.bool(pair_flag)
self.img_cal_name=img_cal_name
self.img_ori = img_ori
self.fixp_name = fixp_name
#load read_targ_rec
targRecParams = par.TargRecParams(n_img, path = self.par_path)
targRecParams.read()
(gvthres, disco, nnmin, nnmax, nxmin, nxmax, nymin, nymax, sumg_min, cr_sz) = \
(targRecParams.gvthres, targRecParams.disco, targRecParams.nnmin, targRecParams.nnmax, targRecParams.nxmin, \
targRecParams.nxmax, targRecParams.nymin, targRecParams.nymax, targRecParams.sumg_min, targRecParams.cr_sz)
self.Gray_Tresh_1 = gvthres[0]
self.Gray_Tresh_2 = gvthres[1]
self.Gray_Tresh_3 = gvthres[2]
self.Gray_Tresh_4 = gvthres[3]
self.Min_Npix = nnmin
self.Max_Npix = nnmax
self.Min_Npix_x = nxmin
self.Max_Npix_x = nxmax
self.Min_Npix_y = nymin
self.Max_Npix_y = nymax
self.Sum_Grey = sumg_min
self.Tol_Disc = disco
self.Size_Cross = cr_sz
#load pft_version
pftVersionParams = par.PftVersionParams(path = self.par_path)
pftVersionParams.read()
self.Existing_Target = n.bool(pftVersionParams.Existing_Target)
#load sequence_par
sequenceParams = par.SequenceParams(n_img, path = self.par_path)
sequenceParams.read()
(base_name, first, last) = \
(sequenceParams.base_name, sequenceParams.first, sequenceParams.last)
self.Basename_1_Seq = base_name[0]
self.Basename_2_Seq = base_name[1]
self.Basename_3_Seq = base_name[2]
self.Basename_4_Seq = base_name[3]
self.Seq_First=first
self.Seq_Last = last
#load criteria_par
criteriaParams = par.CriteriaParams(path = self.par_path)
criteriaParams.read()
(X_lay, Zmin_lay, Zmax_lay, cnx, cny, cn, csumg, corrmin, eps0) = \
(criteriaParams.X_lay, criteriaParams.Zmin_lay, criteriaParams.Zmax_lay, criteriaParams.cnx, \
criteriaParams.cny, criteriaParams.cn, criteriaParams.csumg, criteriaParams.corrmin, criteriaParams.eps0)
self.Xmin = X_lay[0]
self.Xmax = X_lay[1]
self.Zmin1 = Zmin_lay[0]
self.Zmin2 = Zmin_lay[1]
self.Zmax1 = Zmax_lay[0]
self.Zmax2 = Zmax_lay[1]
self.Min_Corr_nx = cnx
self.Min_Corr_ny = cny
self.Min_Corr_npix = cn
self.Sum_gv = csumg
self.Min_Weight_corr = corrmin
self.Tol_Band = eps0
#create initfunc
def __init__(self, par_path):
self.par_path = par_path
self._reload()
# -----------------------------------------------------------------------------
class Calib_Params(HasTraits):
#general and unsed variables
pair_enable_flag = Bool(True)
n_img = Int()
img_name = []
img_cal=[]
hp_flag = Bool()
allCam_flag = Bool()
mmp_n1 = Float()
mmp_n2 = Float()
mmp_n3 = Float()
mmp_d = Float()
#images data
cam_1 = Str("", label = 'Calibration picture camera 1')
cam_2 = Str("", label = 'Calibration picture camera 2')
cam_3 = Str("", label = 'Calibration picture camera 3')
cam_4 = Str("", label = 'Calibration picture camera 4')
ori_cam_1 = Str("",label = 'Orientation data picture camera 1')
ori_cam_2 = Str("",label = 'Orientation data picture camera 2')
ori_cam_3 = Str("",label = 'Orientation data picture camera 3')
ori_cam_4 = Str("",label = 'Orientation data picture camera 4')
fixp_name = Str("", label = 'File of Coordinates on plate')
tiff_head = Bool(True,label = 'TIFF-Header')
pair_head = Bool(True,label = 'Include pairs')
chfield = Enum("Frame", "Field odd", "Field even")
Group1_1 = Group(Item(name = 'cam_1'),
Item(name = 'cam_2'),
Item(name = 'cam_3'),
Item(name = 'cam_4'),
label = 'Calibration pictures',
show_border = True)
Group1_2 = Group(Item(name = 'ori_cam_1'),
Item(name = 'ori_cam_2'),
Item(name = 'ori_cam_3'),
Item(name = 'ori_cam_4'),
label = 'Orientation data',
show_border = True)
Group1_3 = Group(Item(name = 'fixp_name'),
Group(Item(name = 'tiff_head'),
Item(name = 'pair_head', enabled_when = 'pair_enable_flag'),
Item(name = 'chfield', show_label = False,style = 'custom'),
orientation = 'vertical',
columns = 3),
orientation = 'vertical')
# Group 1 is the group of General parameters
# number of cameras, use only quadruplets or also triplets/pairs?
# names of the test images, calibration files
Group1 = Group(Group1_1, Group1_2, Group1_3,
orientation = 'vertical',
label = 'Images Data')
#calibration data detection
h_image_size = Int('',label = 'Image size horizontal')
v_image_size = Int('', label = 'Image size vertical')
h_pixel_size = Float('', label = 'Pixel size horizontal')
v_pixel_size = Float('', label = 'Pixel size vertical')
grey_value_treshold_1 = Int('', label = 'First Image')
grey_value_treshold_2 = Int('', label = 'Second Image')
grey_value_treshold_3 = Int('', label = 'Third Image')
grey_value_treshold_4 = Int('', label = 'Forth Image')
tolerable_discontinuity = Int('', label = 'Tolerable discontinuity')
min_npix = Int('', label = 'min npix')
min_npix_x = Int('', label = 'min npix in x')
min_npix_y = Int('', label = 'min npix in y')
max_npix = Int('', label = 'max npix')
max_npix_x = Int('', label = 'max npix in x')
max_npix_y = Int('', label = 'max npix in y')
sum_of_grey = Int('', label = 'Sum of greyvalue')
size_of_crosses = Int('', label = 'Size of crosses')
Group2_1 = Group(Item(name = 'h_image_size'),
Item(name = 'v_image_size'),
Item(name = 'h_pixel_size'),
Item(name = 'v_pixel_size'),
label = 'Image properties',
show_border = True,
orientation = 'horizontal')
Group2_2 = Group(Item(name = 'grey_value_treshold_1'),
Item(name = 'grey_value_treshold_2'),
Item(name = 'grey_value_treshold_3'),
Item(name = 'grey_value_treshold_4'),
orientation = 'horizontal',
label = 'Grayvalue threshold',
show_border = True),
Group2_3 = Group(Group(Item(name = 'min_npix'),
Item(name = 'min_npix_x'),
Item(name = 'min_npix_y'),
orientation = 'vertical'),
Group(Item(name = 'max_npix'),
Item(name = 'max_npix_x'),
Item(name = 'max_npix_y'),
orientation = 'vertical'),
Group(Item(name = 'tolerable_discontinuity'),
Item(name = 'sum_of_grey'),
Item(name = 'size_of_crosses'),
orientation = 'vertical'),
orientation = 'horizontal')
Group2 = Group(Group2_1, Group2_2, Group2_3,
orientation = 'vertical',
label = 'Calibration Data Detection')
#manuel pre orientation
img_1_p1 = Int('',label = 'P1')
img_1_p2 = Int('',label = 'P2')
img_1_p3 = Int('',label = 'P3')
img_1_p4 = Int('',label = 'P4')
img_2_p1 = Int('',label = 'P1')
img_2_p2 = Int('',label = 'P2')
img_2_p3 = Int('',label = 'P3')
img_2_p4 = Int('',label = 'P4')
img_3_p1 = Int('',label = 'P1')
img_3_p2 = Int('',label = 'P2')
img_3_p3 = Int('',label = 'P3')
img_3_p4 = Int('',label = 'P4')
img_4_p1 = Int('',label = 'P1')
img_4_p2 = Int('',label = 'P2')
img_4_p3 = Int('',label = 'P3')
img_4_p4 = Int('',label = 'P4')
Group3_1 = Group(Item(name = 'img_1_p1'),
Item(name = 'img_1_p2'),
Item(name = 'img_1_p3'),
Item(name = 'img_1_p4'),
orientation = 'horizontal',
label = 'Image 1',
show_border = True)
Group3_2 = Group(Item(name = 'img_2_p1'),
Item(name = 'img_2_p2'),
Item(name = 'img_2_p3'),
Item(name = 'img_2_p4'),
orientation = 'horizontal',
label = 'Image 2',
show_border = True)
Group3_3 = Group(Item(name = 'img_3_p1'),
Item(name = 'img_3_p2'),
Item(name = 'img_3_p3'),
Item(name = 'img_3_p4'),
orientation = 'horizontal',
label = 'Image 3',
show_border = True)
Group3_4 = Group(Item(name = 'img_4_p1'),
Item(name = 'img_4_p2'),
Item(name = 'img_4_p3'),
Item(name = 'img_4_p4'),
orientation = 'horizontal',
label = 'Image 4',
show_border = True)
Group3 = Group(Group3_1, Group3_2, Group3_3, Group3_4,
show_border = True,label= 'Manual pre-orientation')
#calibration orientation param.
Examine_Flag = Bool('', label = 'Calibrate with different Z')
Combine_Flag = Bool('' , label = 'Combine preprocessed planes')
point_number_of_orientation = Int('', label = 'Point number of orientation')
principle_distance = Bool(False, label = 'Princple distance')
xp = Bool(False, label = 'xp')
yp = Bool(False, label = 'yp')
k1 = Bool(False, label = 'K1')
k2 = Bool(False, label = 'K2')
k3 = Bool(False, label = 'K3')
p1 = Bool(False, label = 'P1')
p2 = Bool(False, label = 'P2')
scx = Bool(False, label = 'scx')
she = Bool(False, label = 'she')
interf = Bool(False, label='interfaces check box are available')
Group4_0 = Group(Item(name = 'Examine_Flag'),
Item(name = 'Combine_Flag'),
show_border = True)
Group4_1 = Group(Item(name= 'principle_distance'),
Item(name= 'xp'),
Item(name= 'yp'),
orientation = 'vertical', columns = 3)
Group4_2 = Group(Item(name = 'k1'),
Item(name = 'k2'),
Item(name = 'k3'),
Item(name = 'p1'),
Item(name = 'p2'),
orientation = 'vertical', columns = 5,
label = 'Lens distortion(Brown)',
show_border = True)
Group4_3 = Group(Item(name = 'scx'),
Item(name = 'she'),
orientation = 'vertical', columns = 2,
label = 'Affin transformation',
show_border = True)
Group4_4 = Group(Item(name = 'interf'))
Group4 = Group(Group(Group4_0,Item(name = 'point_number_of_orientation'), Group4_1,
Group4_2, Group4_3,Group4_4,
label = ' Orientation Parameters ',
show_border = True),
orientation = 'horizontal',
show_border = True,
label = 'Calibration Orientation Param.')
#dumbbell parameters
#5 eps (mm)
#46.5 dumbbell scale
#0.005 gradient descent factor
#1 weight for dumbbell penalty
#2 step size through sequence
#500 num iterations per click
dumbbell_eps = Float('', label = 'dumbbell epsilon')
dumbbell_scale = Float('', label = 'dumbbell scale')
dumbbell_gradient_descent = Float('', label = 'dumbbell gradient descent factor')
dumbbell_penalty_weight = Float('', label = 'weight for dumbbell penalty')
dumbbell_step = Int('', label = 'step size through sequence')
dumbbell_niter = Int('', label = 'number of iterations per click')
Group5 = HGroup(VGroup(Item(name = 'dumbbell_eps'),
Item(name = 'dumbbell_scale'),
Item(name = 'dumbbell_gradient_descent'),
Item(name = 'dumbbell_penalty_weight'),
Item(name = 'dumbbell_step'),
Item(name = 'dumbbell_niter')),
spring,
label = 'Dumbbell calibration parameters',
show_border = True)
# shaking parameters
# 10000 - first frame
# 10004 - last frame
# 10 - max num points used per frame
# 5 - max number of frames to track
shaking_first_frame = Int('',label='shaking first frame')
shaking_last_frame = Int('',label = 'shaking last frame')
shaking_max_num_points = Int('',label = 'shaking max num points')
shaking_max_num_frames = Int('',label = 'shaking max num frames')
Group6 = HGroup(VGroup(Item(name = 'shaking_first_frame',),
Item(name = 'shaking_last_frame'),
Item(name = 'shaking_max_num_points'),
Item(name = 'shaking_max_num_frames')),
spring,
label = 'Shaking calibration parameters',
show_border = True)
Calib_Params_View = View( Tabbed(Group1,
Group2,
Group3,
Group4,
Group5,
Group6),
buttons = [ 'Undo', 'OK', 'Cancel' ],
handler = CalHandler(),
title='Calibration Parameters')
def _reload(self):
#print("raloading")
#self.__init__(self)
#load ptv_par
ptvParams = par.PtvParams(path = self.par_path)
ptvParams.read()
(n_img, img_name, img_cal, hp_flag, allCam_flag, tiff_flag, imx, imy, pix_x, pix_y, chfield, mmp_n1, mmp_n2, mmp_n3, mmp_d) = \
(ptvParams.n_img, ptvParams.img_name, ptvParams.img_cal, ptvParams.hp_flag, ptvParams.allCam_flag, ptvParams.tiff_flag, \
ptvParams.imx, ptvParams.imy, ptvParams.pix_x, ptvParams.pix_y, ptvParams.chfield, ptvParams.mmp_n1, ptvParams.mmp_n2, ptvParams.mmp_n3, ptvParams.mmp_d)
#read picture size parameters
self.h_image_size = imx
self.v_image_size = imy
self.h_pixel_size = pix_x
self.v_pixel_size = pix_y
self.img_cal=img_cal
if allCam_flag:
self.pair_enable_flag = False
else:
self.pair_enable_flag = True
#unesed parameters
self.n_img = n_img
self.img_name = img_name
self.hp_flag = n.bool(hp_flag)
self.allCam_flag = n.bool(allCam_flag)
self.mmp_n1 = mmp_n1
self.mmp_n2 = mmp_n2
self.mmp_n3 = mmp_n3
self.mmp_d = mmp_d
#read_calibration parameters
calOriParams = par.CalOriParams(n_img, path = self.par_path)
calOriParams.read()
(fixp_name, img_cal_name, img_ori, tiff_flag, pair_flag, chfield) = \
(calOriParams.fixp_name, calOriParams.img_cal_name, calOriParams.img_ori, calOriParams.tiff_flag, calOriParams.pair_flag, calOriParams.chfield)
self.cam_1 = img_cal_name[0]
self.cam_2 = img_cal_name[1]
self.cam_3 = img_cal_name[2]
self.cam_4 = img_cal_name[3]
self.ori_cam_1 = img_ori[0]
self.ori_cam_2 = img_ori[1]
self.ori_cam_3 = img_ori[2]
self.ori_cam_4 = img_ori[3]
self.tiff_head = n.bool(tiff_flag)
self.pair_head = n.bool(pair_flag)
self.fixp_name = fixp_name
if chfield == 0 :
self.chfield = "Frame"
elif chfield == 1:
self.chfield = "Field odd"
else :
self.chfield = "Field even"
#read detect plate parameters
detectPlateParams = par.DetectPlateParams(path = self.par_path)
detectPlateParams.read()
(gv_th1, gv_th2, gv_th3, gv_th4,tolerable_discontinuity, min_npix, max_npix, min_npix_x, \
max_npix_x, min_npix_y, max_npix_y, sum_of_grey, size_of_crosses) = \
(detectPlateParams.gvth_1, detectPlateParams.gvth_2, detectPlateParams.gvth_3, detectPlateParams.gvth_4, \
detectPlateParams.tol_dis, detectPlateParams.min_npix, detectPlateParams.max_npix, detectPlateParams.min_npix_x, \
detectPlateParams.max_npix_x, detectPlateParams.min_npix_y, detectPlateParams.max_npix_y, detectPlateParams.sum_grey, \
detectPlateParams.size_cross)
self.grey_value_treshold_1 = gv_th1
self.grey_value_treshold_2 = gv_th2
self.grey_value_treshold_3 = gv_th3
self.grey_value_treshold_4 = gv_th4
self.tolerable_discontinuity = tolerable_discontinuity
self.min_npix = min_npix
self.min_npix_x = min_npix_x
self.min_npix_y = min_npix_y
self.max_npix = max_npix
self.max_npix_x = max_npix_x
self.max_npix_y = max_npix_y
self.sum_of_grey = sum_of_grey
self.size_of_crosses = size_of_crosses
#read manual orientaion parameters
manOriParams = par.ManOriParams(n_img, 4, path = self.par_path)
manOriParams.read()
nr = manOriParams.nr
self.img_1_p1 = nr[0][0]
self.img_1_p2 = nr[0][1]
self.img_1_p3 = nr[0][2]
self.img_1_p4 = nr[0][3]
self.img_2_p1 = nr[1][0]
self.img_2_p2 = nr[1][1]
self.img_2_p3 = nr[1][2]
self.img_2_p4 = nr[1][3]
self.img_3_p1 = nr[2][0]
self.img_3_p2 = nr[2][1]
self.img_3_p3 = nr[2][2]
self.img_3_p4 = nr[2][3]
self.img_4_p1 = nr[3][0]
self.img_4_p2 = nr[3][1]
self.img_4_p3 = nr[3][2]
self.img_4_p4 = nr[3][3]
# examine arameters
examineParams = par.ExamineParams(path = self.par_path)
examineParams.read()
(self.Examine_Flag, self.Combine_Flag) = (examineParams.Examine_Flag, examineParams.Combine_Flag)
# orientation parameters
orientParams = par.OrientParams(path = self.par_path)
orientParams.read()
(po_num_of_ori, pri_dist, xp, yp, k1, k2, k3, p1, p2, scx, she, interf) = \
(orientParams.pnfo, orientParams.prin_dis, orientParams.xp, orientParams.yp, orientParams.k1, orientParams.k2, orientParams.k3, \
orientParams.p1, orientParams.p2, orientParams.scx, orientParams.she, orientParams.interf)
self.point_number_of_orientation = po_num_of_ori
self.principle_distance = n.bool(pri_dist)
self.xp = n.bool(xp)
self.yp = n.bool(yp)
self.k1 = n.bool(k1)
self.k2 = n.bool(k2)
self.k3 = n.bool(k3)
self.p1 = n.bool(p1)
self.p2 = n.bool(p2)
self.scx = n.bool(scx)
self.she = n.bool(she)
self.interf = n.bool(interf)
dumbbellParams = par.DumbbellParams(path = self.par_path)
dumbbellParams.read()
(self.dumbbell_eps, self.dumbbell_scale, self.dumbbell_gradient_descent, \
self.dumbbell_penalty_weight, self.dumbbell_step, self.dumbbell_niter) = \
(dumbbellParams.dumbbell_eps, dumbbellParams.dumbbell_scale, \
dumbbellParams.dumbbell_gradient_descent, dumbbellParams.dumbbell_penalty_weight, \
dumbbellParams.dumbbell_step, dumbbellParams.dumbbell_niter)
shakingParams = par.ShakingParams(path = self.par_path)
shakingParams.read()
(self.shaking_first_frame, self.shaking_last_frame, self.shaking_max_num_points, \
self.shaking_max_num_frames) = (shakingParams.shaking_first_frame, shakingParams.shaking_last_frame, \
shakingParams.shaking_max_num_points, shakingParams.shaking_max_num_frames)
def __init__(self, par_path):
self.par_path = par_path
self._reload()
# ---------------------------------------------------------------------------
class Paramset (HasTraits):
name = Str
par_path = Str
m_params = Instance(Main_Params)
c_params = Instance(Calib_Params)
t_params = Instance(Tracking_Params)
class Experiment (HasTraits):
active_params = Instance(Paramset)
paramsets = List(Paramset)
def __init__(self):
self.changed_active_params = False
pass
def getParamsetIdx(self, paramset):
if type(paramset) == type(1): #integer value (index of the paramset)
return paramset
else: #Value is instance of Pramset
return self.paramsets.index(paramset)
def addParamset(self, name, par_path):
self.paramsets.append(Paramset(name=name, par_path=par_path, \
m_params=Main_Params(par_path=par_path), c_params=Calib_Params(par_path=par_path), \
t_params=Tracking_Params(par_path=par_path)))
def removeParamset(self, paramset):
paramset_idx = self.getParamsetIdx(paramset)
self.paramsets.remove(self.paramsets[paramset_idx])
def nParamsets(self):
return len(self.paramsets)
def setActive(self, paramset):
paramset_idx = self.getParamsetIdx(paramset)
self.active_params = self.paramsets[paramset_idx]
self.paramsets.pop(paramset_idx)
self.paramsets.insert(0,self.active_params)
self.syncActiveDir()
def syncActiveDir(self):
par.copy_params_dir(self.active_params.par_path, par.temp_path)
def populate_runs(self, exp_path):
#Read all parameters directories from an experiment directory
print "inside populate runs"
self.paramsets = []
dir_contents = [f for f in os.listdir(exp_path) if os.path.isdir(os.path.join(exp_path, f))]
dir_contents = [f for f in dir_contents if f.startswith(general.par_dir_prefix)]
if len(dir_contents) == 1 and dir_contents[0] == general.par_dir_prefix:
# single parameters directory, backward compatibility
exp_name = 'Run1'
par.copy_params_dir(dir_contents[0],dir_contents[0]+exp_name)
dir_contents.append(dir_contents[0]+exp_name)
for dir_item in dir_contents:
par_path = os.path.join(exp_path, dir_item)
if dir_item != general.par_dir_prefix:
#This should be a params dir, add a tree entry for it.
exp_name = dir_item[len(general.par_dir_prefix):]
self.addParamset(exp_name, par_path)
if not self.changed_active_params:
if self.nParamsets() > 0:
self.setActive(0)
| |
#!/usr/bin/env python
"""Distutils based setup script for SymPy.
This uses Distutils (http://python.org/sigs/distutils-sig/) the standard
python mechanism for installing packages. For the easiest installation
just type the command (you'll probably need root privileges for that):
python setup.py install
This will install the library in the default location. For instructions on
how to customize the install procedure read the output of:
python setup.py --help install
In addition, there are some other commands:
python setup.py clean -> will clean all trash (*.pyc and stuff)
python setup.py test -> will run the complete test suite
python setup.py bench -> will run the complete benchmark suite
python setup.py audit -> will run pyflakes checker on source code
To get a full list of avaiable commands, read the output of:
python setup.py --help-commands
Or, if all else fails, feel free to write to the sympy list at
sympy@googlegroups.com and ask for help.
"""
from distutils.core import setup
from distutils.core import Command
import sys
import sympy
from sympy.utilities.runtests import test, doctest
# Make sure I have the right Python version.
if sys.version_info[:2] < (2,4):
print "Sympy requires Python 2.4 or newer. Python %d.%d detected" % \
sys.version_info[:2]
sys.exit(-1)
# Check that this list is uptodate against the result of the command (you can
# omit the thirdparty/ dir):
# $ find * -name __init__.py |sort
modules = [
'sympy.assumptions',
'sympy.assumptions.handlers',
'sympy.concrete',
'sympy.core',
'sympy.functions',
'sympy.functions.combinatorial',
'sympy.functions.elementary',
'sympy.functions.special',
'sympy.galgebra',
'sympy.geometry',
'sympy.integrals',
'sympy.interactive',
'sympy.matrices',
'sympy.ntheory',
'sympy.parsing',
'sympy.physics',
'sympy.plotting',
'sympy.tensor',
'sympy.thirdparty',
'sympy.logic',
'sympy.logic.algorithms',
'sympy.logic.utilities',
'sympy.mpmath',
'sympy.mpmath.libmp',
'sympy.mpmath.functions',
'sympy.mpmath.matrices',
'sympy.mpmath.calculus',
'sympy.polys',
'sympy.printing',
'sympy.printing.pretty',
'sympy.series',
'sympy.simplify',
'sympy.solvers',
'sympy.statistics',
'sympy.utilities',
'sympy.utilities.mathml',
]
class audit(Command):
"""Audits Sympy's source code for following issues:
- Names which are used but not defined or used before they are defined.
- Names which are redefined without having been used.
"""
description = "Audit Sympy source with PyFlakes"
user_options = []
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
import os
try:
import pyflakes.scripts.pyflakes as flakes
except:
print """In order to run the audit, you need to have PyFlakes installed."""
sys.exit(-1)
dirs = [os.path.join(*i.split('.')) for i in modules]
warns = 0
for dir in dirs:
filenames = os.listdir(dir)
for filename in filenames:
if filename.endswith('.py') and filename != '__init__.py':
warns += flakes.checkPath(os.path.join(dir, filename))
if warns > 0:
print ("Audit finished with total %d warnings" % warns)
class clean(Command):
"""Cleans *.pyc and debian trashs, so you should get the same copy as
is in the VCS.
"""
description = "remove build files"
user_options = [("all","a","the same")]
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
import os
os.system("py.cleanup")
os.system("rm -f python-build-stamp-2.4")
os.system("rm -f MANIFEST")
os.system("rm -rf build")
os.system("rm -rf dist")
os.system("rm -rf doc/_build")
class test_sympy(Command):
"""Runs all tests under the sympy/ folder
"""
description = "run all tests and doctests; also see bin/test and bin/doctest"
user_options = [] # distutils complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
def run(self):
if test():
# all regular tests run successfuly, so let's also run doctests
# (if some regular test fails, the doctests are not run)
doctest()
class run_benchmarks(Command):
"""Runs all SymPy benchmarks"""
description = "run all benchmarks"
user_options = [] # distutils complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
# we use py.test like architecture:
#
# o collector -- collects benchmarks
# o runner -- executes benchmarks
# o presenter -- displays benchmarks results
#
# this is done in sympy.utilities.benchmarking on top of py.test
def run(self):
from sympy.utilities import benchmarking
benchmarking.main(['sympy'])
# Check that this list is uptodate against the result of the command:
# $ python bin/generate_test_list.py
tests = [
'sympy.assumptions.tests',
'sympy.concrete.tests',
'sympy.core.tests',
'sympy.functions.combinatorial.tests',
'sympy.functions.elementary.tests',
'sympy.functions.special.tests',
'sympy.galgebra.tests',
'sympy.geometry.tests',
'sympy.integrals.tests',
'sympy.logic.tests',
'sympy.matrices.tests',
'sympy.mpmath.tests',
'sympy.ntheory.tests',
'sympy.parsing.tests',
'sympy.physics.tests',
'sympy.plotting.tests',
'sympy.polys.tests',
'sympy.printing.pretty.tests',
'sympy.printing.tests',
'sympy.series.tests',
'sympy.simplify.tests',
'sympy.slow_tests',
'sympy.solvers.tests',
'sympy.statistics.tests',
'sympy.test_external',
'sympy.utilities.tests',
]
# update the following list from:
# http://pyglet.googlecode.com/svn/trunk/setup.py
# (whenever we update pyglet in sympy)
# try ./setup.py sdist to see if it works
pyglet_packages=[
'pyglet',
'pyglet.app',
'pyglet.font',
'pyglet.gl',
'pyglet.graphics',
'pyglet.image',
'pyglet.image.codecs',
'pyglet.media',
'pyglet.media.drivers',
'pyglet.media.drivers.directsound',
'pyglet.media.drivers.openal',
'pyglet.text',
'pyglet.text.formats',
'pyglet.window',
'pyglet.window.carbon',
'pyglet.window.win32',
'pyglet.window.xlib',
]
pyglet_packages = ["sympy.thirdparty.pyglet." + s for s in pyglet_packages]
setup(
name = 'sympy',
version = sympy.__version__,
description = 'Computer algebra system (CAS) in Python',
author = 'SymPy development team',
author_email = 'sympy@googlegroups.com',
license = 'BSD',
url = 'http://code.google.com/p/sympy',
packages = ['sympy'] + modules + tests + pyglet_packages,
scripts = ['bin/isympy'],
ext_modules = [],
package_data = { 'sympy.utilities.mathml' : ['data/*.xsl'] },
data_files = [('share/man/man1', ['doc/man/isympy.1'])],
cmdclass = {'test': test_sympy,
'bench': run_benchmarks,
'clean': clean,
'audit' : audit,
},
)
| |
from sys import version_info
import gdb
if version_info[0] >= 3:
xrange = range
ZERO_FIELD = "__0"
FIRST_FIELD = "__1"
def unwrap_unique_or_non_null(unique_or_nonnull):
# BACKCOMPAT: rust 1.32
# https://github.com/rust-lang/rust/commit/7a0911528058e87d22ea305695f4047572c5e067
ptr = unique_or_nonnull["pointer"]
return ptr if ptr.type.code == gdb.TYPE_CODE_PTR else ptr[ZERO_FIELD]
class EnumProvider:
def __init__(self, valobj):
content = valobj[valobj.type.fields()[0]]
fields = content.type.fields()
self.empty = len(fields) == 0
if not self.empty:
if len(fields) == 1:
discriminant = 0
else:
discriminant = int(content[fields[0]]) + 1
self.active_variant = content[fields[discriminant]]
self.name = fields[discriminant].name
self.full_name = "{}::{}".format(valobj.type.name, self.name)
else:
self.full_name = valobj.type.name
def to_string(self):
return self.full_name
def children(self):
if not self.empty:
yield self.name, self.active_variant
class StdStringProvider:
def __init__(self, valobj):
self.valobj = valobj
vec = valobj["vec"]
self.length = int(vec["len"])
self.data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"])
def to_string(self):
return self.data_ptr.lazy_string(encoding="utf-8", length=self.length)
@staticmethod
def display_hint():
return "string"
class StdOsStringProvider:
def __init__(self, valobj):
self.valobj = valobj
buf = self.valobj["inner"]["inner"]
is_windows = "Wtf8Buf" in buf.type.name
vec = buf[ZERO_FIELD] if is_windows else buf
self.length = int(vec["len"])
self.data_ptr = unwrap_unique_or_non_null(vec["buf"]["ptr"])
def to_string(self):
return self.data_ptr.lazy_string(encoding="utf-8", length=self.length)
def display_hint(self):
return "string"
class StdStrProvider:
def __init__(self, valobj):
self.valobj = valobj
self.length = int(valobj["length"])
self.data_ptr = valobj["data_ptr"]
def to_string(self):
return self.data_ptr.lazy_string(encoding="utf-8", length=self.length)
@staticmethod
def display_hint():
return "string"
def _enumerate_array_elements(element_ptrs):
for (i, element_ptr) in enumerate(element_ptrs):
key = "[{}]".format(i)
element = element_ptr.dereference()
try:
# rust-lang/rust#64343: passing deref expr to `str` allows
# catching exception on garbage pointer
str(element)
except RuntimeError:
yield key, "inaccessible"
break
yield key, element
class StdSliceProvider:
def __init__(self, valobj):
self.valobj = valobj
self.length = int(valobj["length"])
self.data_ptr = valobj["data_ptr"]
def to_string(self):
return "{}(size={})".format(self.valobj.type, self.length)
def children(self):
return _enumerate_array_elements(
self.data_ptr + index for index in xrange(self.length)
)
@staticmethod
def display_hint():
return "array"
class StdVecProvider:
def __init__(self, valobj):
self.valobj = valobj
self.length = int(valobj["len"])
self.data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"])
def to_string(self):
return "Vec(size={})".format(self.length)
def children(self):
return _enumerate_array_elements(
self.data_ptr + index for index in xrange(self.length)
)
@staticmethod
def display_hint():
return "array"
class StdVecDequeProvider:
def __init__(self, valobj):
self.valobj = valobj
self.head = int(valobj["head"])
self.tail = int(valobj["tail"])
self.cap = int(valobj["buf"]["cap"])
self.data_ptr = unwrap_unique_or_non_null(valobj["buf"]["ptr"])
if self.head >= self.tail:
self.size = self.head - self.tail
else:
self.size = self.cap + self.head - self.tail
def to_string(self):
return "VecDeque(size={})".format(self.size)
def children(self):
return _enumerate_array_elements(
(self.data_ptr + ((self.tail + index) % self.cap)) for index in xrange(self.size)
)
@staticmethod
def display_hint():
return "array"
class StdRcProvider:
def __init__(self, valobj, is_atomic=False):
self.valobj = valobj
self.is_atomic = is_atomic
self.ptr = unwrap_unique_or_non_null(valobj["ptr"])
self.value = self.ptr["data" if is_atomic else "value"]
self.strong = self.ptr["strong"]["v" if is_atomic else "value"]["value"]
self.weak = self.ptr["weak"]["v" if is_atomic else "value"]["value"] - 1
def to_string(self):
if self.is_atomic:
return "Arc(strong={}, weak={})".format(int(self.strong), int(self.weak))
else:
return "Rc(strong={}, weak={})".format(int(self.strong), int(self.weak))
def children(self):
yield "value", self.value
yield "strong", self.strong
yield "weak", self.weak
class StdCellProvider:
def __init__(self, valobj):
self.value = valobj["value"]["value"]
def to_string(self):
return "Cell"
def children(self):
yield "value", self.value
class StdRefProvider:
def __init__(self, valobj):
self.value = valobj["value"].dereference()
self.borrow = valobj["borrow"]["borrow"]["value"]["value"]
def to_string(self):
borrow = int(self.borrow)
if borrow >= 0:
return "Ref(borrow={})".format(borrow)
else:
return "Ref(borrow_mut={})".format(-borrow)
def children(self):
yield "*value", self.value
yield "borrow", self.borrow
class StdRefCellProvider:
def __init__(self, valobj):
self.value = valobj["value"]["value"]
self.borrow = valobj["borrow"]["value"]["value"]
def to_string(self):
borrow = int(self.borrow)
if borrow >= 0:
return "RefCell(borrow={})".format(borrow)
else:
return "RefCell(borrow_mut={})".format(-borrow)
def children(self):
yield "value", self.value
yield "borrow", self.borrow
# Yields children (in a provider's sense of the word) for a BTreeMap.
def children_of_btree_map(map):
# Yields each key/value pair in the node and in any child nodes.
def children_of_node(node_ptr, height):
def cast_to_internal(node):
internal_type_name = node.type.target().name.replace("LeafNode", "InternalNode", 1)
internal_type = gdb.lookup_type(internal_type_name)
return node.cast(internal_type.pointer())
if node_ptr.type.name.startswith("alloc::collections::btree::node::BoxedNode<"):
# BACKCOMPAT: rust 1.49
node_ptr = node_ptr["ptr"]
node_ptr = unwrap_unique_or_non_null(node_ptr)
leaf = node_ptr.dereference()
keys = leaf["keys"]
vals = leaf["vals"]
edges = cast_to_internal(node_ptr)["edges"] if height > 0 else None
length = leaf["len"]
for i in xrange(0, length + 1):
if height > 0:
child_ptr = edges[i]["value"]["value"]
for child in children_of_node(child_ptr, height - 1):
yield child
if i < length:
# Avoid "Cannot perform pointer math on incomplete type" on zero-sized arrays.
key_type_size = keys.type.sizeof
val_type_size = vals.type.sizeof
key = keys[i]["value"]["value"] if key_type_size > 0 else gdb.parse_and_eval("()")
val = vals[i]["value"]["value"] if val_type_size > 0 else gdb.parse_and_eval("()")
yield key, val
if map["length"] > 0:
root = map["root"]
if root.type.name.startswith("core::option::Option<"):
root = root.cast(gdb.lookup_type(root.type.name[21:-1]))
node_ptr = root["node"]
height = root["height"]
for child in children_of_node(node_ptr, height):
yield child
class StdBTreeSetProvider:
def __init__(self, valobj):
self.valobj = valobj
def to_string(self):
return "BTreeSet(size={})".format(self.valobj["map"]["length"])
def children(self):
inner_map = self.valobj["map"]
for i, (child, _) in enumerate(children_of_btree_map(inner_map)):
yield "[{}]".format(i), child
@staticmethod
def display_hint():
return "array"
class StdBTreeMapProvider:
def __init__(self, valobj):
self.valobj = valobj
def to_string(self):
return "BTreeMap(size={})".format(self.valobj["length"])
def children(self):
for i, (key, val) in enumerate(children_of_btree_map(self.valobj)):
yield "key{}".format(i), key
yield "val{}".format(i), val
@staticmethod
def display_hint():
return "map"
# BACKCOMPAT: rust 1.35
class StdOldHashMapProvider:
def __init__(self, valobj, show_values=True):
self.valobj = valobj
self.show_values = show_values
self.table = self.valobj["table"]
self.size = int(self.table["size"])
self.hashes = self.table["hashes"]
self.hash_uint_type = self.hashes.type
self.hash_uint_size = self.hashes.type.sizeof
self.modulo = 2 ** self.hash_uint_size
self.data_ptr = self.hashes[ZERO_FIELD]["pointer"]
self.capacity_mask = int(self.table["capacity_mask"])
self.capacity = (self.capacity_mask + 1) % self.modulo
marker = self.table["marker"].type
self.pair_type = marker.template_argument(0)
self.pair_type_size = self.pair_type.sizeof
self.valid_indices = []
for idx in range(self.capacity):
data_ptr = self.data_ptr.cast(self.hash_uint_type.pointer())
address = data_ptr + idx
hash_uint = address.dereference()
hash_ptr = hash_uint[ZERO_FIELD]["pointer"]
if int(hash_ptr) != 0:
self.valid_indices.append(idx)
def to_string(self):
if self.show_values:
return "HashMap(size={})".format(self.size)
else:
return "HashSet(size={})".format(self.size)
def children(self):
start = int(self.data_ptr) & ~1
hashes = self.hash_uint_size * self.capacity
align = self.pair_type_size
len_rounded_up = (((((hashes + align) % self.modulo - 1) % self.modulo) & ~(
(align - 1) % self.modulo)) % self.modulo - hashes) % self.modulo
pairs_offset = hashes + len_rounded_up
pairs_start = gdb.Value(start + pairs_offset).cast(self.pair_type.pointer())
for index in range(self.size):
table_index = self.valid_indices[index]
idx = table_index & self.capacity_mask
element = (pairs_start + idx).dereference()
if self.show_values:
yield "key{}".format(index), element[ZERO_FIELD]
yield "val{}".format(index), element[FIRST_FIELD]
else:
yield "[{}]".format(index), element[ZERO_FIELD]
def display_hint(self):
return "map" if self.show_values else "array"
class StdHashMapProvider:
def __init__(self, valobj, show_values=True):
self.valobj = valobj
self.show_values = show_values
table = self.table()
table_inner = table["table"]
capacity = int(table_inner["bucket_mask"]) + 1
ctrl = table_inner["ctrl"]["pointer"]
self.size = int(table_inner["items"])
self.pair_type = table.type.template_argument(0).strip_typedefs()
self.new_layout = not table_inner.type.has_key("data")
if self.new_layout:
self.data_ptr = ctrl.cast(self.pair_type.pointer())
else:
self.data_ptr = table_inner["data"]["pointer"]
self.valid_indices = []
for idx in range(capacity):
address = ctrl + idx
value = address.dereference()
is_presented = value & 128 == 0
if is_presented:
self.valid_indices.append(idx)
def table(self):
if self.show_values:
hashbrown_hashmap = self.valobj["base"]
elif self.valobj.type.fields()[0].name == "map":
# BACKCOMPAT: rust 1.47
# HashSet wraps std::collections::HashMap, which wraps hashbrown::HashMap
hashbrown_hashmap = self.valobj["map"]["base"]
else:
# HashSet wraps hashbrown::HashSet, which wraps hashbrown::HashMap
hashbrown_hashmap = self.valobj["base"]["map"]
return hashbrown_hashmap["table"]
def to_string(self):
if self.show_values:
return "HashMap(size={})".format(self.size)
else:
return "HashSet(size={})".format(self.size)
def children(self):
pairs_start = self.data_ptr
for index in range(self.size):
idx = self.valid_indices[index]
if self.new_layout:
idx = -(idx + 1)
element = (pairs_start + idx).dereference()
if self.show_values:
yield "key{}".format(index), element[ZERO_FIELD]
yield "val{}".format(index), element[FIRST_FIELD]
else:
yield "[{}]".format(index), element[ZERO_FIELD]
def display_hint(self):
return "map" if self.show_values else "array"
| |
# file eulxml/xpath/ast.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Abstract Syntax Tree nodes for parsed XPath.
This module contains basic nodes for representing parsed XPath expressions.
The parser provided by this module creates its parsed XPath representation
from the classes defined in this module. Library callers will mostly not use
this module directly, unless they need to produce XPath ASTs from scratch or
perhaps introspect ASTs returned by the parser.
'''
from __future__ import unicode_literals
import sys
# python2/3 string type logic borrowed from six
# NOTE: not importing six here because setup.py needs to generate
# the parser at install time, when six installation is not yet available
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
__all__ = [
'serialize',
'UnaryExpression',
'BinaryExpression',
'PredicatedExpression',
'AbsolutePath',
'Step',
'NameTest',
'NodeType',
'AbbreviatedStep',
'VariableReference',
'FunctionCall',
]
def serialize(xp_ast):
'''Serialize an XPath AST as a valid XPath expression.'''
return ''.join(_serialize(xp_ast))
def _serialize(xp_ast):
'''Generate token strings which, when joined together, form a valid
XPath serialization of the AST.'''
if hasattr(xp_ast, '_serialize'):
for tok in xp_ast._serialize():
yield tok
elif isinstance(xp_ast, string_types):
# strings in serialized xpath needed to be quoted
# (e.g. for use in paths, comparisons, etc)
# using repr to quote them; for unicode, the leading
# u (u'') needs to be removed.
yield repr(xp_ast).lstrip('u')
else:
yield str(xp_ast)
class UnaryExpression(object):
'''A unary XPath expression. Practially, this means -foo.'''
def __init__(self, op, right):
self.op = op
'''the operator used in the expression'''
self.right = right
'''the expression the operator is applied to'''
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__,
self.op, serialize(self.right))
def _serialize(self):
yield self.op
for tok in _serialize(self.right):
yield tok
KEYWORDS = set(['or', 'and', 'div', 'mod'])
class BinaryExpression(object):
'''Any binary XPath expression. a/b; a and b; a | b.'''
def __init__(self, left, op, right):
self.left = left
'''the left side of the binary expression'''
self.op = op
'''the operator of the binary expression'''
self.right = right
'''the right side of the binary expression'''
def __repr__(self):
return '<%s %s %s %s>' % (self.__class__.__name__,
serialize(self.left), self.op, serialize(self.right))
def _serialize(self):
for tok in _serialize(self.left):
yield tok
if self.op in KEYWORDS:
yield ' '
yield self.op
yield ' '
else:
yield self.op
for tok in _serialize(self.right):
yield tok
class PredicatedExpression(object):
'''A filtered XPath expression. $var[1]; (a or b)[foo][@bar].'''
def __init__(self, base, predicates=None):
self.base = base
'''the base expression to be filtered'''
self.predicates = predicates or []
'''a list of filter predicates'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def append_predicate(self, pred):
self.predicates.append(pred)
def _serialize(self):
yield '('
for tok in _serialize(self.base):
yield tok
yield ')'
for pred in self.predicates:
yield '['
for tok in _serialize(pred):
yield tok
yield ']'
class AbsolutePath(object):
'''An absolute XPath path. /a/b/c; //a/ancestor:b/@c.'''
def __init__(self, op='/', relative=None):
self.op = op
'''the operator used to root the expression'''
self.relative = relative
'''the relative path after the absolute root operator'''
def __repr__(self):
if self.relative:
return '<%s %s %s>' % (self.__class__.__name__,
self.op, serialize(self.relative))
else:
return '<%s %s>' % (self.__class__.__name__, self.op)
def _serialize(self):
yield self.op
for tok in _serialize(self.relative):
yield tok
class Step(object):
'''A single step in a relative path. a; @b; text(); parent::foo:bar[5].'''
def __init__(self, axis, node_test, predicates):
self.axis = axis
'''the step's axis, or @ or None if abbreviated or undefined'''
self.node_test = node_test
'''a NameTest or NodeType object describing the test represented'''
self.predicates = predicates
'''a list of predicates filtering the step'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
if self.axis == '@':
yield '@'
elif self.axis:
yield self.axis
yield '::'
for tok in self.node_test._serialize():
yield tok
for predicate in self.predicates:
yield '['
for tok in _serialize(predicate):
yield tok
yield ']'
class NameTest(object):
'''An element name node test for a Step.'''
def __init__(self, prefix, name):
self.prefix = prefix
'''the namespace prefix used for the test, or None if unset'''
self.name = name
'''the node name used for the test, or *'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
if self.prefix:
yield self.prefix
yield ':'
yield self.name
def __str__(self):
return ''.join(self._serialize())
class NodeType(object):
'''A node type node test for a Step.'''
def __init__(self, name, literal=None):
self.name = name
'''the node type name, such as node or text'''
self.literal = literal
'''the argument to the node specifier. XPath allows these only for
processing-instruction() node tests.'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
yield self.name
yield '('
if self.literal is not None:
for tok in _serialize(self.literal):
yield self.literal
yield ')'
def __str__(self):
return ''.join(self._serialize())
class AbbreviatedStep(object):
'''An abbreviated XPath step. . or ..'''
def __init__(self, abbr):
self.abbr = abbr
'''the abbreviated step'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
yield self.abbr
class VariableReference(object):
'''An XPath variable reference. $foo; $myns:foo.'''
def __init__(self, name):
self.name = name
'''a tuple (prefix, localname) containing the variable name'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
yield '$'
prefix, localname = self.name
if prefix:
yield prefix
yield ':'
yield localname
class FunctionCall(object):
'''An XPath function call. foo(); my:foo(1); foo(1, 'a', $var).'''
def __init__(self, prefix, name, args):
self.prefix = prefix
'''the namespace prefix, or None if unspecified'''
self.name = name
'''the local function name'''
self.args = args
'''a list of argument expressions'''
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
serialize(self))
def _serialize(self):
if self.prefix:
yield self.prefix
yield ':'
yield self.name
yield '('
if self.args:
for tok in _serialize(self.args[0]):
yield tok
for arg in self.args[1:]:
yield ','
for tok in _serialize(arg):
yield tok
yield ')'
| |
#!/usr/bin/env python
#
# Gaze tracking calibration
# - use calibration video heatmap and priors
#
# AUTHOR : Mike Tyszka
# PLACE : Caltech
# DATES : 2014-05-15 JMT From scratch
#
# This file is part of mrgaze.
#
# mrgaze is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mrgaze is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mrgaze. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014 California Institute of Technology.
import os
import cv2
import json
import numpy as np
import pylab as plt
from skimage import filters, exposure
from scipy import ndimage
from mrgaze import moco, engine
def AutoCalibrate(ss_res_dir, cfg):
'''
Automatic calibration transform from pupil center timeseries
'''
# Get fixation heatmap percentile limits and Gaussian blur sigma
pmin = cfg.getfloat('CALIBRATION', 'heatpercmin')
pmax = cfg.getfloat('CALIBRATION', 'heatpercmax')
plims = (pmin, pmax)
sigma = cfg.getfloat('CALIBRATION', 'heatsigma')
# Get target coordinates
targetx = json.loads(cfg.get('CALIBRATION', 'targetx'))
targety = json.loads(cfg.get('CALIBRATION', 'targety'))
# Gaze space target coordinates (n x 2)
targets = np.array([targetx, targety]).transpose()
# Calibration pupilometry file
cal_pupils_csv = os.path.join(ss_res_dir,'cal_pupils.csv')
if not os.path.isfile(cal_pupils_csv):
print('* Calibration pupilometry not found - returning')
return False
# Read raw pupilometry data
p = engine.ReadPupilometry(cal_pupils_csv)
# Extract useful timeseries
t = p[:,0] # Video soft timestamp
px = p[:,2] # Video pupil center, x
py = p[:,3] # Video pupil center, y
# Remove NaNs (blinks, etc) from t, x and y
ok = np.isfinite(px)
t, x, y = t[ok], px[ok], py[ok]
# Find spatial fixations and sort temporally
# Returns heatmap with axes
fixations, hmap, xedges, yedges = FindFixations(x, y, plims, sigma)
# Temporally sort fixations - required for matching to targets
fixations = SortFixations(t, x, y, fixations)
# Plot labeled calibration heatmap to results directory
PlotCalibration(ss_res_dir, hmap, xedges, yedges, fixations)
# Check for autocalibration problems
n_targets = targets.shape[0]
n_fixations = fixations.shape[0]
if n_targets == n_fixations:
# Compute calibration mapping video to gaze space
C = CalibrationModel(fixations, targets)
# Determine central fixation coordinate in video space
central_fix = CentralFixation(fixations, targets)
# Write calibration results to CSV files in the results subdir
WriteCalibration(ss_res_dir, fixations, C, central_fix)
else:
print('* Number of detected fixations (%d) and targets (%d) differ - exiting' % (n_fixations, n_targets))
# Return empty/dummy values
C = np.array([])
central_fix = 0.0, 0.0
return C, central_fix
def FindFixations(x, y, plims=(5,95), sigma=2.0):
'''
Find fixations by blob location in pupil center heat map
Fixations returned are not time ordered
'''
# Find robust ranges
xmin, xmax = np.percentile(x, plims)
ymin, ymax = np.percentile(y, plims)
# Expand bounding box by 30%
sf = 1.30
hx, hy = (xmax - xmin) * sf * 0.5, (ymax - ymin) * sf * 0.5
cx, cy = (xmin + xmax) * 0.5, (ymin + ymax) * 0.5
xmin, xmax = cx - hx, cx + hx
ymin, ymax = cy - hy, cy + hy
# Compute calibration video heatmap
hmap, xedges, yedges = HeatMap(x, y, (xmin, xmax), (ymin, ymax), sigma)
# Heatmap dimensions
# *** Note y/row, x/col ordering
ny, nx = hmap.shape
# Determine blob threshold for heatmap
# Need to accommodate hotspots from longer fixations
# particularly at center.
# A single fixation blob shouldn't exceed 1% of total frame
# area so clamp heatmap to 99th percentile
pA, pB = np.percentile(hmap, (0, 99))
hmap = exposure.rescale_intensity(hmap, in_range = (pA, pB))
# Otsu threshold clamped heatmap
th = filters.threshold_otsu(hmap)
blobs = np.array(hmap > th, np.uint8)
# Morphological opening (circle 2 pixels diameter)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
# blobs = cv2.morphologyEx(blobs, cv2.MORPH_OPEN, kernel)
# Label connected components
labels, n_labels = ndimage.label(blobs)
# Find blob centroids
# Transpose before assigning to x and y arrays
pnts = np.array(ndimage.measurements.center_of_mass(hmap, labels, range(1, n_labels+1)))
# Parse x and y coordinates
# *** Note y/row, x/col ordering
fix_x, fix_y = pnts[:,1], pnts[:,0]
# Map blob centroids to video pixel space using xedges and yedges
# of histogram2d bins (heatmap pixels). Note that pixels are centered
# on their coordinates when rendered by imshow. So a pixel at (1,2) is
# rendered as a rectangle with corners at (0.5,1.5) and (1.5, 2.5)
fix_xi = np.interp(fix_x, np.linspace(-0.5, nx-0.5, nx+1), xedges)
fix_yi = np.interp(fix_y, np.linspace(-0.5, ny-0.5, ny+1), yedges)
# Fixation centroids (n x 2)
fixations = np.array((fix_xi, fix_yi)).T
return fixations, hmap, xedges, yedges
def SortFixations(t, x, y, fixations):
'''
Temporally sort detected spatial fixations
Arguments
----
t : float vector
Sample time points in seconds
x : float vector
Pupil center x coordinate timeseries
y : float vector
Pupil center y coordinate timeseries
fixations : n x 2 float array
Detected spatial fixation coordinates
Returns
----
fixations_sorted : 2 x n float array
Spatial fixations sorted temporally
central_fix : float tuple
Pupil center in video space for central fixation
'''
# Count number of fixations and timepoints
nt = x.shape[0]
nf = fixations.shape[0]
# Put coordinate timeseries in columns
X = np.zeros([nt,2])
X[:,0] = x
X[:,1] = y
# Map each pupil center to nearest fixation
idx = NearestFixation(X, fixations)
# Median time of each fixation
t_fix = np.zeros(nf)
for fc in np.arange(0,nf):
t_fix[fc] = np.median(t[idx==fc])
# Temporally sort fixations
fix_order = np.argsort(t_fix)
fixations_sorted = fixations[fix_order,:]
return fixations_sorted
def NearestFixation(X, fixations):
'''
Map pupil centers to index of nearest fixation
'''
# Number of time points and fixations
nt = X.shape[0]
nf = fixations.shape[0]
# Distance array
dist2fix = np.zeros((nt, nf))
# Fill distance array (nt x nfix)
for (fix_i, fix) in enumerate(fixations):
dx, dy = X[:,0] - fix[0], X[:,1] - fix[1]
dist2fix[:, fix_i] = np.sqrt(dx**2 + dy**2)
# Find index of minimum distance fixation for each timepoint
return np.argmin(dist2fix, axis=1)
def CalibrationModel(fixations, targets):
'''
Construct biquadratic transform from video space to gaze space
BIQUADRATIC CALIBRATION MODEL
----
We need to solve the matrix equation C * R = R0 where
C = biquadratic transform matrix (2 x 6) (rank 2, full row rank)
R = fixation matrix (6 x n) in video space (rank 6)
R0 = fixation targets (2 x n) in gaze space (rank 2)
R has rows xx, xy, yy, x, y, 1
Arguments
----
fixations : n x 2 float array
Fixation coordinates in video space. n >= 6
targets : n x 2 float array
Fixation targets in normalized gazed space
Returns
----
C : 2 x 6 float array
Biquadratic video-gaze post-multiply transform matrix
'''
# Init biquadratic coefficient array
C = np.zeros((2,6))
# Need at least 6 points for biquadratic mapping
if fixations.shape[0] < 6:
print('Too few fixations for biquadratic video to gaze mapping')
return C
# Create fixation biquadratic matrix, R
R = MakeR(fixations)
# R0t is the transposed target coordinate array (n x 2)
R0 = targets.transpose()
# Compute C by pseudoinverse of R (R+)
# C.R = R0
# C.R.R+ = R0.R+ = C
Rplus = np.linalg.pinv(R)
C = R0.dot(Rplus)
# Check that C maps correctly
# print(C.dot(R).transpose())
# print(R0.transpose())
return C
def MakeR(points):
# Extract coordinates from n x 2 points matrix
x, y = points[:,0], points[:,1]
# Additional binomial coordinates
xx = x * x
yy = y * y
xy = x * y;
# Construct R (n x 6)
R = np.array((xx, xy, yy, x, y, np.ones_like(x)))
return R
def HeatMap(x, y, xlims, ylims, sigma=1.0):
'''
Convert pupil center timeseries to 2D heatmap
'''
# Eliminate NaNs in x, y (from blinks)
x = x[np.isfinite(x)]
y = y[np.isfinite(y)]
# Parse out limits
xmin, xmax = xlims
ymin, ymax = ylims
#---
# NOTE: heatmap dimensions are y (1st) then x (2nd)
# corresponding to rows then columns.
# All coordinate orderings are adjusted accordingly
#---
# Composite histogram axis ranges
# Make bin count different for x and y for debugging
# *** Note y/row, x/col ordering
hbins = [np.linspace(ymin, ymax, 64), np.linspace(xmin, xmax, 65)]
# Construct histogram
# *** Note y/row, x/col ordering
hmap, yedges, xedges = np.histogram2d(y, x, bins=hbins)
# Gaussian blur
if sigma > 0:
hmap = cv2.GaussianBlur(hmap, (0,0), sigma, sigma)
return hmap, xedges, yedges
def ApplyCalibration(ss_dir, C, central_fix, cfg):
'''
Apply calibration transform to gaze pupil center timeseries
- apply motion correction if requested (highpass or known fixations)
- Save calibrated gaze to text file in results directory
Arguments
----
Returns
----
'''
print(' Calibrating pupilometry timeseries')
# Uncalibrated gaze pupilometry file
gaze_uncal_csv = os.path.join(ss_dir,'results','gaze_pupils.csv')
# Known central fixations file
fixations_txt = os.path.join(ss_dir,'videos','fixations.txt')
if not os.path.isfile(gaze_uncal_csv):
print('* Uncalibrated gaze pupilometry not found - returning')
return False
# Read raw pupilometry data
p = engine.ReadPupilometry(gaze_uncal_csv)
# Extract useful timeseries
t = p[:,0] # Video soft timestamp
x = p[:,2] # Pupil x
y = p[:,3] # Pupil y
# Retrospective motion correction - only use when consistent glint is unavailable
motioncorr = cfg.get('ARTIFACTS','motioncorr')
mocokernel = cfg.getint('ARTIFACTS','mocokernel')
if motioncorr == 'knownfixations':
print(' Motion correction using known fixations')
print(' Central fixation at (%0.3f, %0.3f)' % (central_fix[0], central_fix[1]))
x, y, bx, by = moco.KnownFixations(t, x, y, fixations_txt, central_fix)
elif motioncorr == 'highpass':
print(' Motion correction by high pass filtering (%d sample kernel)' % mocokernel)
print(' Central fixation at (%0.3f, %0.3f)' % (central_fix[0], central_fix[1]))
x, y, bx, by = moco.HighPassFilter(t, x, y, mocokernel, central_fix)
else:
print('* Unknown motion correction requested (%s) - skipping' % (motioncorr))
# Return dummy x and y baseline estimates
bx, by = np.zeros_like(x), np.zeros_like(y)
# Additional binomial coordinates
xx = x * x
yy = y * y
xy = x * y;
# Construct R
R = np.array((xx, xy, yy, x, y, np.ones_like(x)))
# Apply calibration transform to pupil-glint vector timeseries
# (2 x n) = (2 x 6) x (6 x n)
gaze = C.dot(R)
# Write calibrated gaze to CSV file in results directory
gaze_csv = os.path.join(ss_dir,'results','gaze_calibrated.csv')
WriteGaze(gaze_csv, t, gaze[0,:], gaze[1,:], bx, by)
return True
def CentralFixation(fixations, targets):
'''
Find video coordinate corresponding to gaze fixation at (0.5, 0.5)
'''
idx = -1
central_fix = np.array([np.NaN, np.NaN])
for ii in range(targets.shape[0]):
if targets[ii,0] == 0.5 and targets[ii,1] == 0.5:
idx = ii
central_fix = fixations[idx,:]
if idx < 0:
print('* Central fixation target not found')
central_fix = np.array([np.NaN, np.NaN])
return central_fix
def WriteGaze(gaze_csv, t, gaze_x, gaze_y, bline_x, bline_y):
'''
Write calibrated gaze to CSV file
'''
# Open calibrated gaze CSV file to write
try:
gaze_stream = open(gaze_csv, 'w')
except:
print('* Problem opening gaze CSV file to write - skipping')
return False
'''
Write gaze line to file
Timeseries in columns. Column order is:
0 : Time (s)
1 : Calibrated gaze x
2 : Calibrated gaze y
'''
for (tc,tt) in enumerate(t):
gaze_stream.write('%0.3f,%0.3f,%0.3f,%0.3f,%0.3f\n' %
(tt, gaze_x[tc], gaze_y[tc], bline_x[tc], bline_y[tc]))
# Close gaze CSV file
gaze_stream.close()
return True
def ReadGaze(gaze_csv):
'''
Read calibrated gaze timerseries from CSV file
'''
# Read time series in rows
gt = np.genfromtxt(gaze_csv, delimiter=',')
# Parse out array
t, gaze_x, gaze_y = gt[:,0], gt[:,1], gt[:,2]
return t, gaze_x, gaze_y
def PlotCalibration(res_dir, hmap, xedges, yedges, fixations):
'''
Plot the calibration heatmap and temporally sorted fixation labels
'''
# Create a new figure
fig = plt.figure(figsize = (6,6))
# Plot spatial heatmap with fixation centroids
plt.imshow(hmap, interpolation='nearest', aspect='equal',
extent=[xedges[0], xedges[-1], yedges[-1], yedges[0]])
# Fixation coordinate vectors
fx, fy = fixations[:,0], fixations[:,1]
# Overlay fixation centroids with temporal order labels
plt.scatter(fx, fy, c='w', s=40)
alignment = {'horizontalalignment':'center', 'verticalalignment':'center'}
for fc in np.arange(0,fx.shape[0]):
plt.text(fx[fc], fy[fc], '%d' % fc, backgroundcolor='w', color='k', **alignment)
# Save figure without displaying
plt.savefig(os.path.join(res_dir, 'cal_fix_space.png'), dpi=150, bbox_inches='tight')
# Close figure without showing it
plt.close(fig)
def WriteCalibration(ss_res_dir, fixations, C, central_fix):
'''
Write calibration matrix and fixations to CSV files in results subdirectory
'''
# Write calibration matrix to text file in results subdir
calmat_csv = os.path.join(ss_res_dir, 'calibration_matrix.csv')
# Write calibration matrix to CSV file
try:
np.savetxt(calmat_csv, C, delimiter=",")
except:
print('* Problem saving calibration matrix to CSV file - skipping')
return False
# Write calibration fixations in video space to results subdir
calfix_csv = os.path.join(ss_res_dir, 'calibration_fixations.csv')
# Write calibration fixations to CSV file
try:
np.savetxt(calfix_csv, fixations, delimiter=",")
except:
print('* Problem saving calibration fixations to CSV file - skipping')
return False
# Write central fixation in video space to results subdir
ctrfix_csv = os.path.join(ss_res_dir, 'central_fixation.csv')
# Write calibration fixations to CSV file
try:
np.savetxt(ctrfix_csv, central_fix, delimiter=",")
except:
print('* Problem saving central fixation to CSV file - skipping')
return False
return True
| |
"""IPython extension to reload modules before executing user code.
``autoreload`` reloads modules automatically before entering the execution of
code typed at the IPython prompt.
This makes for example the following workflow possible:
.. sourcecode:: ipython
In [1]: %load_ext autoreload
In [2]: %autoreload 2
In [3]: from foo import some_function
In [4]: some_function()
Out[4]: 42
In [5]: # open foo.py in an editor and change some_function to return 43
In [6]: some_function()
Out[6]: 43
The module was reloaded without reloading it explicitly, and the object
imported with ``from foo import ...`` was also updated.
Usage
=====
The following magic commands are provided:
``%autoreload``
Reload all modules (except those excluded by ``%aimport``)
automatically now.
``%autoreload 0``
Disable automatic reloading.
``%autoreload 1``
Reload all modules imported with ``%aimport`` every time before
executing the Python code typed.
``%autoreload 2``
Reload all modules (except those excluded by ``%aimport``) every
time before executing the Python code typed.
``%aimport``
List modules which are to be automatically imported or not to be imported.
``%aimport foo``
Import module 'foo' and mark it to be autoreloaded for ``%autoreload 1``
``%aimport foo, bar``
Import modules 'foo', 'bar' and mark them to be autoreloaded for ``%autoreload 1``
``%aimport -foo``
Mark module 'foo' to not be autoreloaded.
Caveats
=======
Reloading Python modules in a reliable way is in general difficult,
and unexpected things may occur. ``%autoreload`` tries to work around
common pitfalls by replacing function code objects and parts of
classes previously in the module with new versions. This makes the
following things to work:
- Functions and classes imported via 'from xxx import foo' are upgraded
to new versions when 'xxx' is reloaded.
- Methods and properties of classes are upgraded on reload, so that
calling 'c.foo()' on an object 'c' created before the reload causes
the new code for 'foo' to be executed.
Some of the known remaining caveats are:
- Replacing code objects does not always succeed: changing a @property
in a class to an ordinary method or a method to a member variable
can cause problems (but in old objects only).
- Functions that are removed (eg. via monkey-patching) from a module
before it is reloaded are not upgraded.
- C extension modules cannot be reloaded, and so cannot be autoreloaded.
"""
skip_doctest = True
#-----------------------------------------------------------------------------
# Copyright (C) 2000 Thomas Heller
# Copyright (C) 2008 Pauli Virtanen <pav@iki.fi>
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#
# This IPython module is written by Pauli Virtanen, based on the autoreload
# code by Thomas Heller.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import traceback
import types
import weakref
from importlib import import_module
from IPython.utils.py3compat import PY3
from imp import reload
from IPython.utils import openpy
#------------------------------------------------------------------------------
# Autoreload functionality
#------------------------------------------------------------------------------
class ModuleReloader(object):
enabled = False
"""Whether this reloader is enabled"""
check_all = True
"""Autoreload all modules, not just those listed in 'modules'"""
def __init__(self):
# Modules that failed to reload: {module: mtime-on-failed-reload, ...}
self.failed = {}
# Modules specially marked as autoreloadable.
self.modules = {}
# Modules specially marked as not autoreloadable.
self.skip_modules = {}
# (module-name, name) -> weakref, for replacing old code objects
self.old_objects = {}
# Module modification timestamps
self.modules_mtimes = {}
# Cache module modification times
self.check(check_all=True, do_reload=False)
def mark_module_skipped(self, module_name):
"""Skip reloading the named module in the future"""
try:
del self.modules[module_name]
except KeyError:
pass
self.skip_modules[module_name] = True
def mark_module_reloadable(self, module_name):
"""Reload the named module in the future (if it is imported)"""
try:
del self.skip_modules[module_name]
except KeyError:
pass
self.modules[module_name] = True
def aimport_module(self, module_name):
"""Import a module, and mark it reloadable
Returns
-------
top_module : module
The imported module if it is top-level, or the top-level
top_name : module
Name of top_module
"""
self.mark_module_reloadable(module_name)
import_module(module_name)
top_name = module_name.split('.')[0]
top_module = sys.modules[top_name]
return top_module, top_name
def filename_and_mtime(self, module):
if not hasattr(module, '__file__') or module.__file__ is None:
return None, None
if getattr(module, '__name__', None) in ['__mp_main__', '__main__']:
# we cannot reload(__main__) or reload(__mp_main__)
return None, None
filename = module.__file__
path, ext = os.path.splitext(filename)
if ext.lower() == '.py':
py_filename = filename
else:
try:
py_filename = openpy.source_from_cache(filename)
except ValueError:
return None, None
try:
pymtime = os.stat(py_filename).st_mtime
except OSError:
return None, None
return py_filename, pymtime
def check(self, check_all=False, do_reload=True):
"""Check whether some modules need to be reloaded."""
if not self.enabled and not check_all:
return
if check_all or self.check_all:
modules = list(sys.modules.keys())
else:
modules = list(self.modules.keys())
for modname in modules:
m = sys.modules.get(modname, None)
if modname in self.skip_modules:
continue
py_filename, pymtime = self.filename_and_mtime(m)
if py_filename is None:
continue
try:
if pymtime <= self.modules_mtimes[modname]:
continue
except KeyError:
self.modules_mtimes[modname] = pymtime
continue
else:
if self.failed.get(py_filename, None) == pymtime:
continue
self.modules_mtimes[modname] = pymtime
# If we've reached this point, we should try to reload the module
if do_reload:
try:
superreload(m, reload, self.old_objects)
if py_filename in self.failed:
del self.failed[py_filename]
except:
print("[autoreload of %s failed: %s]" % (
modname, traceback.format_exc(10)), file=sys.stderr)
self.failed[py_filename] = pymtime
#------------------------------------------------------------------------------
# superreload
#------------------------------------------------------------------------------
func_attrs = ['__code__', '__defaults__', '__doc__',
'__closure__', '__globals__', '__dict__']
def update_function(old, new):
"""Upgrade the code object of a function"""
for name in func_attrs:
try:
setattr(old, name, getattr(new, name))
except (AttributeError, TypeError):
pass
def update_class(old, new):
"""Replace stuff in the __dict__ of a class, and upgrade
method code objects"""
for key in list(old.__dict__.keys()):
old_obj = getattr(old, key)
try:
new_obj = getattr(new, key)
if old_obj == new_obj:
continue
except AttributeError:
# obsolete attribute: remove it
try:
delattr(old, key)
except (AttributeError, TypeError):
pass
continue
if update_generic(old_obj, new_obj): continue
try:
setattr(old, key, getattr(new, key))
except (AttributeError, TypeError):
pass # skip non-writable attributes
def update_property(old, new):
"""Replace get/set/del functions of a property"""
update_generic(old.fdel, new.fdel)
update_generic(old.fget, new.fget)
update_generic(old.fset, new.fset)
def isinstance2(a, b, typ):
return isinstance(a, typ) and isinstance(b, typ)
UPDATE_RULES = [
(lambda a, b: isinstance2(a, b, type),
update_class),
(lambda a, b: isinstance2(a, b, types.FunctionType),
update_function),
(lambda a, b: isinstance2(a, b, property),
update_property),
]
UPDATE_RULES.extend([(lambda a, b: isinstance2(a, b, types.MethodType),
lambda a, b: update_function(a.__func__, b.__func__)),
])
def update_generic(a, b):
for type_check, update in UPDATE_RULES:
if type_check(a, b):
update(a, b)
return True
return False
class StrongRef(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
return self.obj
def superreload(module, reload=reload, old_objects={}):
"""Enhanced version of the builtin reload function.
superreload remembers objects previously in the module, and
- upgrades the class dictionary of every old class in the module
- upgrades the code object of every old function and method
- clears the module's namespace before reloading
"""
# collect old objects in the module
for name, obj in list(module.__dict__.items()):
if not hasattr(obj, '__module__') or obj.__module__ != module.__name__:
continue
key = (module.__name__, name)
try:
old_objects.setdefault(key, []).append(weakref.ref(obj))
except TypeError:
pass
# reload module
try:
# clear namespace first from old cruft
old_dict = module.__dict__.copy()
old_name = module.__name__
module.__dict__.clear()
module.__dict__['__name__'] = old_name
module.__dict__['__loader__'] = old_dict['__loader__']
except (TypeError, AttributeError, KeyError):
pass
try:
module = reload(module)
except:
# restore module dictionary on failed reload
module.__dict__.update(old_dict)
raise
# iterate over all objects and update functions & classes
for name, new_obj in list(module.__dict__.items()):
key = (module.__name__, name)
if key not in old_objects: continue
new_refs = []
for old_ref in old_objects[key]:
old_obj = old_ref()
if old_obj is None: continue
new_refs.append(old_ref)
update_generic(old_obj, new_obj)
if new_refs:
old_objects[key] = new_refs
else:
del old_objects[key]
return module
#------------------------------------------------------------------------------
# IPython connectivity
#------------------------------------------------------------------------------
from IPython.core.magic import Magics, magics_class, line_magic
@magics_class
class AutoreloadMagics(Magics):
def __init__(self, *a, **kw):
super(AutoreloadMagics, self).__init__(*a, **kw)
self._reloader = ModuleReloader()
self._reloader.check_all = False
self.loaded_modules = set(sys.modules)
@line_magic
def autoreload(self, parameter_s=''):
r"""%autoreload => Reload modules automatically
%autoreload
Reload all modules (except those excluded by %aimport) automatically
now.
%autoreload 0
Disable automatic reloading.
%autoreload 1
Reload all modules imported with %aimport every time before executing
the Python code typed.
%autoreload 2
Reload all modules (except those excluded by %aimport) every time
before executing the Python code typed.
Reloading Python modules in a reliable way is in general
difficult, and unexpected things may occur. %autoreload tries to
work around common pitfalls by replacing function code objects and
parts of classes previously in the module with new versions. This
makes the following things to work:
- Functions and classes imported via 'from xxx import foo' are upgraded
to new versions when 'xxx' is reloaded.
- Methods and properties of classes are upgraded on reload, so that
calling 'c.foo()' on an object 'c' created before the reload causes
the new code for 'foo' to be executed.
Some of the known remaining caveats are:
- Replacing code objects does not always succeed: changing a @property
in a class to an ordinary method or a method to a member variable
can cause problems (but in old objects only).
- Functions that are removed (eg. via monkey-patching) from a module
before it is reloaded are not upgraded.
- C extension modules cannot be reloaded, and so cannot be
autoreloaded.
"""
if parameter_s == '':
self._reloader.check(True)
elif parameter_s == '0':
self._reloader.enabled = False
elif parameter_s == '1':
self._reloader.check_all = False
self._reloader.enabled = True
elif parameter_s == '2':
self._reloader.check_all = True
self._reloader.enabled = True
@line_magic
def aimport(self, parameter_s='', stream=None):
"""%aimport => Import modules for automatic reloading.
%aimport
List modules to automatically import and not to import.
%aimport foo
Import module 'foo' and mark it to be autoreloaded for %autoreload 1
%aimport foo, bar
Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload 1
%aimport -foo
Mark module 'foo' to not be autoreloaded for %autoreload 1
"""
modname = parameter_s
if not modname:
to_reload = sorted(self._reloader.modules.keys())
to_skip = sorted(self._reloader.skip_modules.keys())
if stream is None:
stream = sys.stdout
if self._reloader.check_all:
stream.write("Modules to reload:\nall-except-skipped\n")
else:
stream.write("Modules to reload:\n%s\n" % ' '.join(to_reload))
stream.write("\nModules to skip:\n%s\n" % ' '.join(to_skip))
elif modname.startswith('-'):
modname = modname[1:]
self._reloader.mark_module_skipped(modname)
else:
for _module in ([_.strip() for _ in modname.split(',')]):
top_module, top_name = self._reloader.aimport_module(_module)
# Inject module to user namespace
self.shell.push({top_name: top_module})
def pre_run_cell(self):
if self._reloader.enabled:
try:
self._reloader.check()
except:
pass
def post_execute_hook(self):
"""Cache the modification times of any modules imported in this execution
"""
newly_loaded_modules = set(sys.modules) - self.loaded_modules
for modname in newly_loaded_modules:
_, pymtime = self._reloader.filename_and_mtime(sys.modules[modname])
if pymtime is not None:
self._reloader.modules_mtimes[modname] = pymtime
self.loaded_modules.update(newly_loaded_modules)
def load_ipython_extension(ip):
"""Load the extension in IPython."""
auto_reload = AutoreloadMagics(ip)
ip.register_magics(auto_reload)
ip.events.register('pre_run_cell', auto_reload.pre_run_cell)
ip.events.register('post_execute', auto_reload.post_execute_hook)
| |
from PyQt4 import QtGui, QtCore
import time
class StoryTelling(QtGui.QWidget):
storytelling_event = QtCore.pyqtSignal(list)
def __init__(self):
super(StoryTelling, self).__init__()
self.isRealTime = True
##GUI
self.layoutMain = QtGui.QHBoxLayout()
self.groupMenu = QtGui.QGroupBox("StoryTelling")
self.groupMenu.setStyleSheet("background-color:red")
self.layoutMenu = QtGui.QVBoxLayout()
self.checkBox_realTime = QtGui.QCheckBox("realTime")
self.checkBox_realTime.setChecked(QtCore.Qt.Checked)
self.label_warning = QtGui.QLabel("Msg sent")
self.label_warning.setStyleSheet("background-color:red")
self.button_next = QtGui.QPushButton("Next")
self.button_clock = QtGui.QPushButton("Clock")
self.button_call = QtGui.QPushButton("Call")
self.button_save = QtGui.QPushButton("Save")
self.lcd = QtGui.QLCDNumber(5)
self.lcd.setSegmentStyle(QtGui.QLCDNumber.Flat)
#ListView
self.listView = QtGui.QListView()
self.model = QtGui.QStandardItemModel(self.listView)
##Add to layout
self.layoutMenu.addWidget(self.checkBox_realTime)
self.layoutMenu.addWidget(self.lcd)
self.layoutMenu.addWidget(self.button_next)
self.layoutMenu.addWidget(self.button_clock)
self.layoutMenu.addWidget(self.button_call)
self.layoutMenu.addWidget(self.button_save)
self.groupMenu.setLayout(self.layoutMenu)
self.layoutMain.addWidget(self.groupMenu)
self.layoutMain.addWidget(self.listView)
self.setLayout(self.layoutMain)
### Settings model and list of string
self.list_string = [
'Cookie dough',
'Hummus',
'Spaghetti',
'Dal makhani',
'Chocolate whipped cream'
]
self.open_file()
self.listView.setModel(self.model)
## Connect
self.model.itemChanged.connect(self.line_changed)
self.connect(self.listView, QtCore.SIGNAL("activated(QModelIndex)"), self.callSelection )
self.button_save.clicked.connect(self.save_file)
self.button_call.clicked.connect(self.callSelection_button)
self.button_next.clicked.connect(self.next_index)
self.button_clock.clicked.connect(self.reset_clock)
QtCore.QObject.connect( self.lcd , QtCore.SIGNAL("mousePressEvent()") , self.reset_clock)
## Warning information when message pressed
self.isMessageSent = False
self.imgBlank = "images/blank.png"
self.imgSent = "images/sent.png"
#clock manager to know where we are during the show
self.time_start = time.time()
#Select the fist line of QViewList
first_item = self.model.item(0)
first_modelIndex = self.model.indexFromItem(first_item)
self.listView.setCurrentIndex(first_modelIndex)
def fill_model(self):
self.model.beginResetModel()
for line in self.list_string:
# Create an item with a caption
item = QtGui.QStandardItem(line)
# Add a checkbox to it
item.setCheckable(False)
# Add the item to the model
self.model.appendRow(item)
def save_file(self):
file = open('file/conduite.txt', 'w')
print "save file"
print self.model.rowCount()
for a in range(self.model.rowCount()):
word = str(self.model.item(a).text())
file.write(word+"\n")
file.close()
self.open_file()
def open_file(self):
file = open('file/conduite.txt', 'r')
self.list_string = []
self.model.clear()
for line in file:
self.list_string.append(line[:len(line)-1])
self.fill_model()
print self.list_string
def line_changed(self,item):
print "line_changed"
def warning_message_status(self):
if self.isMessageSent:
self.label_warning.setPixmap(QtGui.QPixmap(self.imgSent))
self.isMessageSent = False
else :
self.label_warning.setPixmap(QtGui.QPixmap(self.imgBlank))
def next_index(self):
selection_list = self.listView.selectedIndexes()
row = 0
if len(selection_list) == 1:
indexItem = selection_list[0]
count = 1
indexItemNext = indexItem.sibling(indexItem.row() + count, 0)
#continue next until the selected line is NOT a comment
while self.is_selectedRow_comment(indexItemNext):
indexItemNext = indexItem.sibling(indexItem.row() + count, 0)
count += 1
self.listView.setCurrentIndex(indexItemNext)
else :
print "erreur : seleciton multiple"
def prev_index(self):
selection_list = self.listView.selectedIndexes()
row = 1
if len(selection_list) == 1:
indexItem = selection_list[0]
count = 1
indexItemNext = indexItem.sibling(indexItem.row() - count, 0)
while self.is_selectedRow_comment(indexItemNext) or indexItemNext.row()==0:
indexItemNext = indexItem.sibling(indexItem.row() - count, 0)
count += 1
self.listView.setCurrentIndex(indexItemNext)
else :
print "erreur : seleciton multiple"
#call a script message from current selection, from ""enter"" key or ""call" button
def is_selectedRow_comment(self, index):
line = str(index.data().toString())
list_of_word = line.split()
if line[:1] == '*' :
return True
else :
return False
def callSelection(self, index):
#weed need a QModelIndex
line = str(index.data().toString())
list_of_word = line.split()
res = []
first = []
#Check if the line is a comment or not
if line[:1] == '*' :
print "no comment"
#select the next line
self.next_index()
#if not, split the line , convert word to float if needed
else :
while len(list_of_word)<3:
list_of_word.append("0")
for word in list_of_word:
if str(word[:1]).isdigit() or str(word[:1]) == "-":
first.append(float(word))
else:
first.append(word)
res.append(first)
print res
#send to nao_manager
self.write_msg(res)
#send warning icon
self.isMessageSent = True
#select the next line
self.next_index()
# connected to "call" button, and resend to self.callSelection function
def callSelection_button(self):
#selection_list is a QModelIndexList
selection_list = self.listView.selectedIndexes()
row = 0
if len(selection_list) == 1:
#indexItem is a QModelIndex
indexItem = selection_list[0]
self.callSelection(indexItem)
def transmit_msg(self,l):
res = []
#First, control that transmit message is controlling the storytelling itself ( next, call, prev)
for msg in l:
if(len(msg)==4):
name = msg[0]
arg1 = msg[2]
if name == "STORY":
if arg1 == "CALL":
self.callSelection_button()
elif arg1 == "NEXT":
self.next_index()
elif arg1 == "PREV":
self.prev_index()
#If not, Controlling nao in real time using controller, and passing trough storrytelling object
if self.checkBox_realTime.isChecked():
self.storytelling_event.emit(l)
def write_msg(self,l):
self.storytelling_event.emit(l)
def update_clock(self):
mytime = time.time() - self.time_start
min = int((int(mytime))/60 )
text = QtCore.QString(str(min)+":"+str(int(mytime)-min*60))
self.lcd.display(text)
def reset_clock(self):
self.time_start = time.time()
| |
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .api import MatrixHttpApi
from .errors import MatrixRequestError, MatrixUnexpectedResponse
from .room import Room
from .user import User
try:
from .crypto.olm_device import OlmDevice
ENCRYPTION_SUPPORT = True
except ImportError:
ENCRYPTION_SUPPORT = False
from threading import Thread
from time import sleep
from uuid import uuid4
from warnings import warn
import logging
import sys
logger = logging.getLogger(__name__)
# Cache constants used when instantiating Matrix Client to specify level of caching
class CACHE(int):
pass
CACHE.NONE = CACHE(-1)
CACHE.SOME = CACHE(0)
CACHE.ALL = CACHE(1)
# TODO: rather than having CACHE.NONE as kwarg to MatrixClient, there should be a separate
# LightweightMatrixClient that only implements global listeners and doesn't hook into
# User, Room, etc. classes at all.
class MatrixClient(object):
"""
The client API for Matrix. For the raw HTTP calls, see MatrixHttpApi.
Args:
base_url (str): The url of the HS preceding /_matrix.
e.g. (ex: https://localhost:8008 )
token (Optional[str]): If you have an access token
supply it here.
user_id (Optional[str]): Optional. Obsolete. For backward compatibility.
valid_cert_check (bool): Check the homeservers
certificate on connections?
cache_level (CACHE): One of CACHE.NONE, CACHE.SOME, or
CACHE.ALL (defined in module namespace).
encryption (bool): Optional. Whether or not to enable end-to-end encryption
support.
encryption_conf (dict): Optional. Configuration parameters for encryption.
Refer to :func:`~matrix_client.crypto.olm_device.OlmDevice` for supported
options, since it will be passed to this class.
Returns:
`MatrixClient`
Raises:
`MatrixRequestError`, `ValueError`
Examples:
Create a new user and send a message::
client = MatrixClient("https://matrix.org")
token = client.register_with_password(username="foobar",
password="monkey")
room = client.create_room("myroom")
room.send_image(file_like_object)
Send a message with an already logged in user::
client = MatrixClient("https://matrix.org", token="foobar",
user_id="@foobar:matrix.org")
client.add_listener(func) # NB: event stream callback
client.rooms[0].add_listener(func) # NB: callbacks just for this room.
room = client.join_room("#matrix:matrix.org")
response = room.send_text("Hello!")
response = room.kick("@bob:matrix.org")
Incoming event callbacks (scopes)::
def user_callback(user, incoming_event):
pass
def room_callback(room, incoming_event):
pass
def global_callback(incoming_event):
pass
Attributes:
users (dict): A map from user ID to :class:`.User` object.
It is populated automatically while tracking the membership in rooms, and
shouldn't be modified directly.
A :class:`.User` object in this dict is shared between all :class:`.Room`
objects where the corresponding user is joined.
"""
def __init__(self, base_url, token=None, user_id=None,
valid_cert_check=True, sync_filter_limit=20,
cache_level=CACHE.ALL, encryption=False, encryption_conf=None):
if user_id:
warn(
"user_id is deprecated. "
"Now it is requested from the server.", DeprecationWarning
)
if encryption and not ENCRYPTION_SUPPORT:
raise ValueError("Failed to enable encryption. Please make sure the olm "
"library is available.")
self.api = MatrixHttpApi(base_url, token)
self.api.validate_certificate(valid_cert_check)
self.listeners = []
self.presence_listeners = {}
self.invite_listeners = []
self.left_listeners = []
self.ephemeral_listeners = []
self.device_id = None
self._encryption = encryption
self.encryption_conf = encryption_conf or {}
self.olm_device = None
if isinstance(cache_level, CACHE):
self._cache_level = cache_level
else:
self._cache_level = CACHE.ALL
raise ValueError(
"cache_level must be one of CACHE.NONE, CACHE.SOME, CACHE.ALL"
)
self.sync_token = None
self.sync_filter = '{ "room": { "timeline" : { "limit" : %i } } }' \
% sync_filter_limit
self.sync_thread = None
self.should_listen = False
""" Time to wait before attempting a /sync request after failing."""
self.bad_sync_timeout_limit = 60 * 60
self.rooms = {
# room_id: Room
}
self.users = {
# user_id: User
}
if token:
response = self.api.whoami()
self.user_id = response["user_id"]
self._sync()
def get_sync_token(self):
warn("get_sync_token is deprecated. Directly access MatrixClient.sync_token.",
DeprecationWarning)
return self.sync_token
def set_sync_token(self, token):
warn("set_sync_token is deprecated. Directly access MatrixClient.sync_token.",
DeprecationWarning)
self.sync_token = token
def set_user_id(self, user_id):
warn("set_user_id is deprecated. Directly access MatrixClient.user_id.",
DeprecationWarning)
self.user_id = user_id
# TODO: combine register methods into single register method controlled by kwargs
def register_as_guest(self):
""" Register a guest account on this HS.
Note: HS must have guest registration enabled.
Returns:
str: Access Token
Raises:
MatrixRequestError
"""
response = self.api.register(auth_body=None, kind='guest')
return self._post_registration(response)
def register_with_password(self, username, password):
""" Register for a new account on this HS.
Args:
username (str): Account username
password (str): Account password
Returns:
str: Access Token
Raises:
MatrixRequestError
"""
response = self.api.register(
auth_body={"type": "m.login.dummy"},
kind='user',
username=username,
password=password,
)
return self._post_registration(response)
def _post_registration(self, response):
self.user_id = response["user_id"]
self.token = response["access_token"]
self.hs = response["home_server"]
self.api.token = self.token
self._sync()
return self.token
def login_with_password_no_sync(self, username, password):
"""Deprecated. Use ``login`` with ``sync=False``.
Login to the homeserver.
Args:
username (str): Account username
password (str): Account password
Returns:
str: Access token
Raises:
MatrixRequestError
"""
warn("login_with_password_no_sync is deprecated. Use login with sync=False.",
DeprecationWarning)
return self.login(username, password, sync=False)
def login_with_password(self, username, password, limit=10):
"""Deprecated. Use ``login`` with ``sync=True``.
Login to the homeserver.
Args:
username (str): Account username
password (str): Account password
limit (int): Deprecated. How many messages to return when syncing.
This will be replaced by a filter API in a later release.
Returns:
str: Access token
Raises:
MatrixRequestError
"""
warn("login_with_password is deprecated. Use login with sync=True.",
DeprecationWarning)
return self.login(username, password, limit, sync=True)
def login(self, username, password, limit=10, sync=True, device_id=None):
"""Login to the homeserver.
Args:
username (str): Account username
password (str): Account password
limit (int): Deprecated. How many messages to return when syncing.
This will be replaced by a filter API in a later release.
sync (bool): Optional. Whether to initiate a /sync request after logging in.
device_id (str): Optional. ID of the client device. The server will
auto-generate a device_id if this is not specified.
Returns:
str: Access token
Raises:
MatrixRequestError
"""
response = self.api.login(
"m.login.password", user=username, password=password, device_id=device_id
)
self.user_id = response["user_id"]
self.token = response["access_token"]
self.hs = response["home_server"]
self.api.token = self.token
self.device_id = response["device_id"]
if self._encryption:
self.olm_device = OlmDevice(
self.api, self.user_id, self.device_id, **self.encryption_conf)
self.olm_device.upload_identity_keys()
self.olm_device.upload_one_time_keys()
if sync:
""" Limit Filter """
self.sync_filter = '{ "room": { "timeline" : { "limit" : %i } } }' % limit
self._sync()
return self.token
def logout(self):
""" Logout from the homeserver.
"""
self.stop_listener_thread()
self.api.logout()
# TODO: move room creation/joining to User class for future application service usage
# NOTE: we may want to leave thin wrappers here for convenience
def create_room(self, alias=None, is_public=False, invitees=None):
""" Create a new room on the homeserver.
Args:
alias (str): The canonical_alias of the room.
is_public (bool): The public/private visibility of the room.
invitees (str[]): A set of user ids to invite into the room.
Returns:
Room
Raises:
MatrixRequestError
"""
response = self.api.create_room(alias=alias,
is_public=is_public,
invitees=invitees)
return self._mkroom(response["room_id"])
def join_room(self, room_id_or_alias):
""" Join a room.
Args:
room_id_or_alias (str): Room ID or an alias.
Returns:
Room
Raises:
MatrixRequestError
"""
response = self.api.join_room(room_id_or_alias)
room_id = (
response["room_id"] if "room_id" in response else room_id_or_alias
)
return self._mkroom(room_id)
def get_rooms(self):
""" Deprecated. Return a dict of {room_id: Room objects} that the user has joined.
Returns:
Room{}: Rooms the user has joined.
"""
warn("get_rooms is deprecated. Directly access MatrixClient.rooms.",
DeprecationWarning)
return self.rooms
# TODO: create Listener class and push as much of this logic there as possible
# NOTE: listeners related to things in rooms should be attached to Room objects
def add_listener(self, callback, event_type=None):
""" Add a listener that will send a callback when the client recieves
an event.
Args:
callback (func(roomchunk)): Callback called when an event arrives.
event_type (str): The event_type to filter for.
Returns:
uuid.UUID: Unique id of the listener, can be used to identify the listener.
"""
listener_uid = uuid4()
# TODO: listeners should be stored in dict and accessed/deleted directly. Add
# convenience method such that MatrixClient.listeners.new(Listener(...)) performs
# MatrixClient.listeners[uuid4()] = Listener(...)
self.listeners.append(
{
'uid': listener_uid,
'callback': callback,
'event_type': event_type
}
)
return listener_uid
def remove_listener(self, uid):
""" Remove listener with given uid.
Args:
uuid.UUID: Unique id of the listener to remove.
"""
self.listeners[:] = (listener for listener in self.listeners
if listener['uid'] != uid)
def add_presence_listener(self, callback):
""" Add a presence listener that will send a callback when the client receives
a presence update.
Args:
callback (func(roomchunk)): Callback called when a presence update arrives.
Returns:
uuid.UUID: Unique id of the listener, can be used to identify the listener.
"""
listener_uid = uuid4()
self.presence_listeners[listener_uid] = callback
return listener_uid
def remove_presence_listener(self, uid):
""" Remove presence listener with given uid
Args:
uuid.UUID: Unique id of the listener to remove
"""
self.presence_listeners.pop(uid)
def add_ephemeral_listener(self, callback, event_type=None):
""" Add an ephemeral listener that will send a callback when the client recieves
an ephemeral event.
Args:
callback (func(roomchunk)): Callback called when an ephemeral event arrives.
event_type (str): The event_type to filter for.
Returns:
uuid.UUID: Unique id of the listener, can be used to identify the listener.
"""
listener_id = uuid4()
self.ephemeral_listeners.append(
{
'uid': listener_id,
'callback': callback,
'event_type': event_type
}
)
return listener_id
def remove_ephemeral_listener(self, uid):
""" Remove ephemeral listener with given uid.
Args:
uuid.UUID: Unique id of the listener to remove.
"""
self.ephemeral_listeners[:] = (listener for listener in self.ephemeral_listeners
if listener['uid'] != uid)
def add_invite_listener(self, callback):
""" Add a listener that will send a callback when the client receives
an invite.
Args:
callback (func(room_id, state)): Callback called when an invite arrives.
"""
self.invite_listeners.append(callback)
def add_leave_listener(self, callback):
""" Add a listener that will send a callback when the client has left a room.
Args:
callback (func(room_id, room)): Callback called when the client
has left a room.
"""
self.left_listeners.append(callback)
def listen_for_events(self, timeout_ms=30000):
"""
This function just calls _sync()
In a future version of this sdk, this function will be deprecated and
_sync method will be renamed sync with the intention of it being called
by downstream code.
Args:
timeout_ms (int): How long to poll the Home Server for before
retrying.
"""
# TODO: see docstring
self._sync(timeout_ms)
def listen_forever(self, timeout_ms=30000, exception_handler=None,
bad_sync_timeout=5):
""" Keep listening for events forever.
Args:
timeout_ms (int): How long to poll the Home Server for before
retrying.
exception_handler (func(exception)): Optional exception handler
function which can be used to handle exceptions in the caller
thread.
bad_sync_timeout (int): Base time to wait after an error before
retrying. Will be increased according to exponential backoff.
"""
_bad_sync_timeout = bad_sync_timeout
self.should_listen = True
while (self.should_listen):
try:
self._sync(timeout_ms)
_bad_sync_timeout = bad_sync_timeout
# TODO: we should also handle MatrixHttpLibError for retry in case no response
except MatrixRequestError as e:
logger.warning("A MatrixRequestError occured during sync.")
if e.code >= 500:
logger.warning("Problem occured serverside. Waiting %i seconds",
bad_sync_timeout)
sleep(bad_sync_timeout)
_bad_sync_timeout = min(_bad_sync_timeout * 2,
self.bad_sync_timeout_limit)
elif exception_handler is not None:
exception_handler(e)
else:
raise
except Exception as e:
logger.exception("Exception thrown during sync")
if exception_handler is not None:
exception_handler(e)
else:
raise
def start_listener_thread(self, timeout_ms=30000, exception_handler=None):
""" Start a listener thread to listen for events in the background.
Args:
timeout (int): How long to poll the Home Server for before
retrying.
exception_handler (func(exception)): Optional exception handler
function which can be used to handle exceptions in the caller
thread.
"""
try:
thread = Thread(target=self.listen_forever,
args=(timeout_ms, exception_handler))
thread.daemon = True
self.sync_thread = thread
self.should_listen = True
thread.start()
except RuntimeError:
e = sys.exc_info()[0]
logger.error("Error: unable to start thread. %s", str(e))
def stop_listener_thread(self):
""" Stop listener thread running in the background
"""
if self.sync_thread:
self.should_listen = False
self.sync_thread.join()
self.sync_thread = None
# TODO: move to User class. Consider creating lightweight Media class.
def upload(self, content, content_type, filename=None):
""" Upload content to the home server and recieve a MXC url.
Args:
content (bytes): The data of the content.
content_type (str): The mimetype of the content.
filename (str): Optional. Filename of the content.
Raises:
MatrixUnexpectedResponse: If the homeserver gave a strange response
MatrixRequestError: If the upload failed for some reason.
"""
try:
response = self.api.media_upload(content, content_type, filename)
if "content_uri" in response:
return response["content_uri"]
else:
raise MatrixUnexpectedResponse(
"The upload was successful, but content_uri wasn't found."
)
except MatrixRequestError as e:
raise MatrixRequestError(
code=e.code,
content="Upload failed: %s" % e
)
def _mkroom(self, room_id):
room = Room(self, room_id)
if self._encryption:
try:
event = self.api.get_state_event(room_id, "m.room.encryption")
if event["algorithm"] == "m.megolm.v1.aes-sha2":
room.encrypted = True
except MatrixRequestError as e:
if e.code != 404:
raise
self.rooms[room_id] = room
return self.rooms[room_id]
# TODO better handling of the blocking I/O caused by update_one_time_key_counts
def _sync(self, timeout_ms=30000):
response = self.api.sync(self.sync_token, timeout_ms, filter=self.sync_filter)
self.sync_token = response["next_batch"]
if 'presence' in response and 'events' in response['presence']:
for presence_update in response['presence']['events']:
for callback in self.presence_listeners.values():
callback(presence_update)
if self._encryption and 'device_one_time_keys_count' in response:
self.olm_device.update_one_time_key_counts(
response['device_one_time_keys_count'])
rooms = response.get("rooms", {})
if 'invite' in rooms:
for room_id, invite_room in rooms['invite'].items():
for listener in self.invite_listeners:
listener(room_id, invite_room['invite_state'])
if 'leave' in rooms:
for room_id, left_room in rooms['leave'].items():
for listener in self.left_listeners:
listener(room_id, left_room)
if room_id in self.rooms:
del self.rooms[room_id]
if 'join' in rooms:
for room_id, sync_room in rooms['join'].items():
if room_id not in self.rooms:
self._mkroom(room_id)
room = self.rooms[room_id]
# TODO: the rest of this for loop should be in room object method
room.prev_batch = sync_room["timeline"]["prev_batch"]
if "state" in sync_room and "events" in sync_room["state"]:
for event in sync_room["state"]["events"]:
event['room_id'] = room_id
room._process_state_event(event)
if "timeline" in sync_room and "events" in sync_room["timeline"]:
for event in sync_room["timeline"]["events"]:
event['room_id'] = room_id
room._put_event(event)
# TODO: global listeners can still exist but work by each
# room.listeners[uuid] having reference to global listener
# Dispatch for client (global) listeners
for listener in self.listeners:
if (
listener['event_type'] is None or
listener['event_type'] == event['type']
):
listener['callback'](event)
if "ephemeral" in sync_room and "events" in sync_room["ephemeral"]:
for event in sync_room['ephemeral']['events']:
event['room_id'] = room_id
room._put_ephemeral_event(event)
for listener in self.ephemeral_listeners:
if (
listener['event_type'] is None or
listener['event_type'] == event['type']
):
listener['callback'](event)
def get_user(self, user_id):
"""Deprecated. Return a User by their id.
This method only instantiate a User, which should be done directly.
You can also use :attr:`users` in order to access a User object which
was created automatically.
Args:
user_id (str): The matrix user id of a user.
"""
warn("get_user is deprecated. Directly instantiate a User instead.",
DeprecationWarning)
return User(self.api, user_id)
# TODO: move to Room class
def remove_room_alias(self, room_alias):
"""Remove mapping of an alias
Args:
room_alias(str): The alias to be removed.
Returns:
bool: True if the alias is removed, False otherwise.
"""
try:
self.api.remove_room_alias(room_alias)
return True
except MatrixRequestError:
return False
| |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import numpy
import copy
from pyscf import lib, gto, scf, dft
from pyscf.tdscf import rhf, rks
from pyscf import tdscf
from pyscf.data import nist
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom = [
['H' , (0. , 0. , .917)],
['F' , (0. , 0. , 0.)], ]
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol).run()
td_hf = tdscf.TDHF(mf).run()
mf_lda3 = dft.RKS(mol)
mf_lda3.xc = 'lda, vwn_rpa'
mf_lda3.grids.prune = None
mf_lda3.scf()
mf_lda = dft.RKS(mol)
mf_lda.xc = 'lda, vwn'
mf_lda.grids.prune = None
mf_lda.scf()
mf_bp86 = dft.RKS(mol)
mf_bp86.xc = 'b88,p86'
mf_bp86.grids.prune = None
mf_bp86.scf()
mf_b3lyp = dft.RKS(mol)
mf_b3lyp.xc = 'b3lyp'
mf_b3lyp.grids.prune = None
mf_b3lyp.scf()
mf_b3lyp1 = dft.RKS(mol)
mf_b3lyp1.xc = 'b3lyp'
mf_b3lyp1.grids.prune = None
mf_b3lyp1._numint.libxc = dft.xcfun
mf_b3lyp1.scf()
#mf_b3pw91g = dft.RKS(mol)
#mf_b3pw91g.xc = 'b3pw91g'
#mf_b3pw91g.grids.prune = None
#mf_b3pw91g.scf()
def tearDownModule():
global mol, mf, td_hf, mf_lda3, mf_lda, mf_bp86, mf_b3lyp, mf_b3lyp1#, mf_b3pw91g
mol.stdout.close()
del mol, mf, td_hf, mf_lda3, mf_lda, mf_bp86, mf_b3lyp, mf_b3lyp1#, mf_b3pw91g
class KnownValues(unittest.TestCase):
def test_nohbrid_lda(self):
td = rks.TDDFTNoHybrid(mf_lda3)
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -41.059050077236151, 6)
ref = [9.74227238, 9.74227238, 14.85153818, 30.35019348, 30.35019348]
self.assertAlmostEqual(abs(es - ref).max(), 0, 6)
def test_nohbrid_b88p86(self):
td = rks.TDDFTNoHybrid(mf_bp86)
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -40.462005239920558, 6)
def test_tddft_lda(self):
td = rks.TDDFT(mf_lda3)
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -41.059050077236151, 6)
def test_tddft_b88p86(self):
td = rks.TDDFT(mf_bp86)
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -40.462005239920558, 6)
#def test_tddft_b3pw91g(self):
# td = rks.TDDFT(mf_b3pw91g)
# es = td.kernel(nstates=5)[0] * 27.2114
# self.assertAlmostEqual(lib.fp(es), -41.218912874291014, 6)
def test_tddft_b3lyp(self):
td = rks.TDDFT(mf_b3lyp)
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -41.29609453661341, 6)
def test_tda_b3lypg(self):
mf = dft.RKS(mol)
mf.xc = 'b3lypg'
mf.grids.prune = None
mf.scf()
td = rks.TDA(mf)
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -41.385520327568869, 6)
#def test_tda_b3pw91g(self):
# td = rks.TDA(mf_b3pw91g)
# es = td.kernel(nstates=5)[0] * 27.2114
# self.assertAlmostEqual(lib.fp(es), -41.313632163628363, 6)
def test_tda_lda(self):
td = rks.TDA(mf_lda)
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -41.201828219760415, 6)
def test_tddft_b3lyp_xcfun(self):
with lib.temporary_env(dft.numint.NumInt, libxc=dft.xcfun):
td = rks.TDDFT(mf_b3lyp1)
es = td.kernel(nstates=5)[0] * 27.2114
ref = [9.88975514, 9.88975514, 15.16643994, 30.55289462, 30.55289462]
self.assertAlmostEqual(abs(es - ref).max(), 0, 6)
def test_tda_b3lyp_xcfun(self):
with lib.temporary_env(dft.numint.NumInt, libxc=dft.xcfun):
td = rks.TDA(mf_b3lyp1)
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -41.393122257109056, 6)
def test_tda_lda_xcfun(self):
mf = dft.RKS(mol)
mf.xc = 'lda,vwn'
mf.grids.prune = None
mf._numint.libxc = dft.xcfun
mf.scf()
td = rks.TDA(mf)
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -41.201828219760415, 6)
ref = [9.68872769, 9.68872769, 15.07122478]
self.assertAlmostEqual(abs(es[:3] - ref).max(), 0, 6)
def test_tda_b3lyp_triplet(self):
td = rks.TDA(mf_b3lyp)
td.singlet = False
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -40.020204585289648, 6)
td.analyze()
def test_tda_lda_triplet(self):
td = rks.TDA(mf_lda)
td.singlet = False
es = td.kernel(nstates=5)[0] * 27.2114
self.assertAlmostEqual(lib.fp(es), -39.988118769202416, 6)
ref = [9.0139312, 9.0139312, 12.42444659]
self.assertAlmostEqual(abs(es[:3] - ref).max(), 0, 6)
def test_tddft_b88p86_triplet(self):
td = rks.TDDFT(mf_bp86)
td.singlet = False
es = td.kernel(nstates=5)[0] * 27.2114
ref = [9.09322358, 9.09322358, 12.29843139, 29.26731075, 29.26731075]
self.assertAlmostEqual(abs(es - ref).max(), 0, 5)
def test_ab_hf(self):
mf = scf.RHF(mol).run()
a, b = rhf.get_ab(mf)
fock = mf.get_hcore() + mf.get_veff()
ftda = rhf.gen_tda_operation(mf, fock, singlet=True)[0]
ftdhf = rhf.gen_tdhf_operation(mf, singlet=True)[0]
nocc = numpy.count_nonzero(mf.mo_occ == 2)
nvir = numpy.count_nonzero(mf.mo_occ == 0)
numpy.random.seed(2)
x, y = xy = numpy.random.random((2,nocc,nvir))
ax = numpy.einsum('iajb,jb->ia', a, x)
self.assertAlmostEqual(abs(ax - ftda([x]).reshape(nocc,nvir)).max(), 0, 6)
ab1 = ax + numpy.einsum('iajb,jb->ia', b, y)
ab2 =-numpy.einsum('iajb,jb->ia', b, x)
ab2-= numpy.einsum('iajb,jb->ia', a, y)
abxy_ref = ftdhf([xy]).reshape(2,nocc,nvir)
self.assertAlmostEqual(abs(ab1 - abxy_ref[0]).max(), 0, 9)
self.assertAlmostEqual(abs(ab2 - abxy_ref[1]).max(), 0, 9)
def test_ab_lda(self):
mf = mf_lda
a, b = rhf.get_ab(mf)
ftda = rhf.gen_tda_operation(mf, singlet=True)[0]
ftdhf = rhf.gen_tdhf_operation(mf, singlet=True)[0]
nocc = numpy.count_nonzero(mf.mo_occ == 2)
nvir = numpy.count_nonzero(mf.mo_occ == 0)
numpy.random.seed(2)
x, y = xy = numpy.random.random((2,nocc,nvir))
ax = numpy.einsum('iajb,jb->ia', a, x)
self.assertAlmostEqual(abs(ax - ftda([x]).reshape(nocc,nvir)).max(), 0, 9)
ab1 = ax + numpy.einsum('iajb,jb->ia', b, y)
ab2 =-numpy.einsum('iajb,jb->ia', b, x)
ab2-= numpy.einsum('iajb,jb->ia', a, y)
abxy_ref = ftdhf([xy]).reshape(2,nocc,nvir)
self.assertAlmostEqual(abs(ab1 - abxy_ref[0]).max(), 0, 9)
self.assertAlmostEqual(abs(ab2 - abxy_ref[1]).max(), 0, 9)
def test_ab_b3lyp(self):
mf = mf_b3lyp
a, b = rks.TDDFT(mf).get_ab()
ftda = rhf.gen_tda_operation(mf, singlet=None)[0]
ftdhf = rhf.gen_tdhf_operation(mf, singlet=True)[0]
nocc = numpy.count_nonzero(mf.mo_occ == 2)
nvir = numpy.count_nonzero(mf.mo_occ == 0)
numpy.random.seed(2)
x, y = xy = numpy.random.random((2,nocc,nvir))
ax = numpy.einsum('iajb,jb->ia', a, x)
self.assertAlmostEqual(abs(ax - ftda([x]).reshape(nocc,nvir)).max(), 0, 9)
ab1 = ax + numpy.einsum('iajb,jb->ia', b, y)
ab2 =-numpy.einsum('iajb,jb->ia', b, x)
ab2-= numpy.einsum('iajb,jb->ia', a, y)
abxy_ref = ftdhf([xy]).reshape(2,nocc,nvir)
self.assertAlmostEqual(abs(ab1 - abxy_ref[0]).max(), 0, 9)
self.assertAlmostEqual(abs(ab2 - abxy_ref[1]).max(), 0, 9)
def test_nto(self):
mf = scf.RHF(mol).run()
td = rks.TDA(mf).run()
w, nto = td.get_nto(state=3)
self.assertAlmostEqual(w[0], 0.98655300613468389, 9)
self.assertAlmostEqual(lib.fp(w), 0.98625701534112464, 9)
w, nto = td.get_nto(state=0)
self.assertAlmostEqual(w[0], 0.99997335352278072, 9)
self.assertAlmostEqual(lib.fp(w), 0.99998775067586554, 9)
pmol = copy.copy(mol)
pmol.symmetry = True
pmol.build(0, 0)
mf = scf.RHF(pmol).run()
td = rks.TDA(mf).run(nstates=3)
w, nto = td.get_nto(state=-1)
self.assertAlmostEqual(w[0], 0.98655300613468389, 9)
self.assertAlmostEqual(lib.fp(w), 0.98625701534112464, 9)
def test_analyze(self):
f = td_hf.oscillator_strength(gauge='length')
self.assertAlmostEqual(lib.fp(f), -0.13908774016795605, 7)
f = td_hf.oscillator_strength(gauge='velocity', order=2)
self.assertAlmostEqual(lib.fp(f), -0.096991134490587522, 6)
note_args = []
def temp_logger_note(rec, msg, *args):
note_args.append(args)
with lib.temporary_env(lib.logger.Logger, note=temp_logger_note):
td_hf.analyze()
ref = [(),
(1, 11.834865910142547, 104.76181013351982, 0.01075359074556743),
(2, 11.834865910142618, 104.76181013351919, 0.010753590745567499),
(3, 16.66308427853695, 74.40651170629978, 0.3740302871966713)]
self.assertAlmostEqual(abs(numpy.hstack(ref) -
numpy.hstack(note_args)).max(), 0, 6)
self.assertEqual(td_hf.nroots, td_hf.nstates)
self.assertAlmostEqual(lib.fp(td_hf.e_tot-mf.e_tot), 0.41508325757603637, 6)
def test_init(self):
hf = scf.RHF(mol)
ks = scf.RKS(mol)
kshf = scf.RKS(mol).set(xc='HF')
self.assertTrue(isinstance(tdscf.TDA(hf), tdscf.rhf.TDA))
self.assertTrue(isinstance(tdscf.TDA(ks), tdscf.rks.TDA))
self.assertTrue(isinstance(tdscf.TDA(kshf), tdscf.rks.TDA))
self.assertTrue(isinstance(tdscf.RPA(hf), tdscf.rhf.TDHF))
self.assertTrue(isinstance(tdscf.RPA(ks), tdscf.rks.TDDFTNoHybrid))
self.assertTrue(isinstance(tdscf.RPA(kshf), tdscf.rks.TDDFT))
self.assertTrue(isinstance(tdscf.TDDFT(hf), tdscf.rhf.TDHF))
self.assertTrue(isinstance(tdscf.TDDFT(ks), tdscf.rks.TDDFTNoHybrid))
self.assertTrue(isinstance(tdscf.TDDFT(kshf), tdscf.rks.TDDFT))
self.assertRaises(RuntimeError, tdscf.dRPA, hf)
self.assertTrue(isinstance(tdscf.dRPA(kshf), tdscf.rks.dRPA))
self.assertTrue(isinstance(tdscf.dRPA(ks), tdscf.rks.dRPA))
self.assertRaises(RuntimeError, tdscf.dTDA, hf)
self.assertTrue(isinstance(tdscf.dTDA(kshf), tdscf.rks.dTDA))
self.assertTrue(isinstance(tdscf.dTDA(ks), tdscf.rks.dTDA))
kshf.xc = ''
self.assertTrue(isinstance(tdscf.dTDA(kshf), tdscf.rks.dTDA))
self.assertTrue(isinstance(tdscf.dRPA(kshf), tdscf.rks.dRPA))
def test_tda_with_wfnsym(self):
pmol = mol.copy()
pmol.symmetry = True
pmol.build(0, 0)
mf = dft.RKS(pmol).run()
td = rks.TDA(mf)
td.wfnsym = 'A2'
es = td.kernel(nstates=3)[0]
self.assertTrue(len(es) == 2) # At most 2 states due to symmetry subspace size
self.assertAlmostEqual(lib.fp(es), 2.1857694738741071, 6)
note_args = []
def temp_logger_note(rec, msg, *args):
note_args.append(args)
with lib.temporary_env(lib.logger.Logger, note=temp_logger_note):
td.analyze()
ref = [(),
(1, 'A2', 38.42106241429979, 32.26985141807447, 0.0),
(2, 'A2', 38.972172173478356, 31.813519911465608, 0.0)]
self.assertEqual(note_args[1][1], 'A2')
self.assertEqual(note_args[2][1], 'A2')
self.assertAlmostEqual(abs(numpy.append(ref[1][2:], ref[2][2:]) -
numpy.append(note_args[1][2:], note_args[2][2:])).max(),
0, 7)
def test_tdhf_with_wfnsym(self):
pmol = mol.copy()
pmol.symmetry = True
pmol.build()
mf = scf.RHF(pmol).run()
td = rhf.TDHF(mf)
td.wfnsym = 'A2'
td.nroots = 3
es = td.kernel()[0]
self.assertAlmostEqual(lib.fp(es), 2.2541287466157165, 6)
td.analyze()
def test_tddft_with_wfnsym(self):
pmol = mol.copy()
pmol.symmetry = True
pmol.build()
mf = dft.RKS(pmol).run()
td = rks.TDDFTNoHybrid(mf)
td.wfnsym = 'A2'
td.nroots = 3
es = td.kernel()[0]
self.assertTrue(len(es) == 2) # At most 2 states due to symmetry subspace size
self.assertAlmostEqual(lib.fp(es), 2.1856920990871753, 6)
td.analyze()
def test_scanner(self):
td_scan = td_hf.as_scanner().as_scanner()
td_scan.nroots = 3
td_scan(mol)
self.assertAlmostEqual(lib.fp(td_scan.e), 0.41508325757603637, 6)
def test_transition_multipoles(self):
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_dipole() [2])), 0.39833021312014988, 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_quadrupole() [2])), 0.14862776196563565, 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_octupole() [2])), 2.79058994496489410, 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_velocity_dipole() [2])), 0.24021409469918567, 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_magnetic_dipole() [2])), 0 , 5)
self.assertAlmostEqual(abs(lib.fp(td_hf.transition_magnetic_quadrupole()[2])), 0.16558596265719450, 5)
def test_dRPA(self):
td = rks.dRPA(mf_lda)
td._scf.xc = ''
es = td.kernel(nstates=5)[0]
self.assertAlmostEqual(lib.fp(es[:3]), 0.32727702719009616, 6)
ref = [10.00343861, 10.00343861, 15.62586305, 30.69238874, 30.69238874]
self.assertAlmostEqual(abs(es * 27.2114 - ref).max(), 0, 6)
def test_dTDA(self):
td = rks.dTDA(mf_lda)
td._scf.xc = ''
es = td.kernel(nstates=3)[0]
self.assertAlmostEqual(lib.fp(es), 0.3237948650800024, 6)
td = rks.dTDA(mf_lda)
es = td.kernel(nstates=5)[0]
self.assertAlmostEqual(lib.fp(es[:3]), 0.3237948650800024, 6)
ref = [10.05245288, 10.05245288, 16.03497655, 30.7120363, 30.7120363 ]
self.assertAlmostEqual(abs(es * 27.2114 - ref).max(), 0, 6)
def test_reset(self):
mol1 = gto.M(atom='C')
td = scf.RHF(mol).newton().TDHF()
td.reset(mol1)
self.assertTrue(td.mol is mol1)
self.assertTrue(td._scf.mol is mol1)
self.assertTrue(td._scf._scf.mol is mol1)
def test_custom_rsh(self):
mol = gto.M(atom='H 0 0 0.6; H 0 0 0', basis = "6-31g")
mf = dft.RKS(mol)
mf._numint.libxc = dft.xcfun
mf.xc = "camb3lyp"
mf.omega = 0.2
e = mf.kernel()
self.assertAlmostEqual(e, -1.143272159913611, 8)
e_td = mf.TDDFT().kernel()[0]
ref = [16.14837289, 28.01968627, 49.00854076]
self.assertAlmostEqual(abs(e_td*nist.HARTREE2EV - ref).max(), 0, 4)
def test_symmetry_init_guess(self):
mol = gto.M(atom='N 0 0 0; N 0 0 1.2', basis='631g', symmetry=True)
td = mol.RHF.run().TDA().run(nstates=1)
self.assertAlmostEqual(td.e[0], 0.22349707455528, 7)
# TODO: verify symmetry of td.x == A1u
if __name__ == "__main__":
print("Full Tests for TD-RKS")
unittest.main()
| |
import six
from abc import ABCMeta
from pyticketswitch.exceptions import InvalidParametersError
@six.add_metaclass(ABCMeta)
class PaymentMethod(object):
"""Abstract base class for payment methods"""
def as_api_parameters(self):
"""Generate keyword arguments suitable for consumption by the
ticketswitch API
Returns:
dict: dictionary of keyword parameters to pass to the API call.
"""
raise NotImplementedError(
'as_api_parameters not implemented on ' + self.__class__)
class CardDetails(object):
"""Credit card details
This should never be returned by the API and is only used for supplying
card details to the purchase call.
Implements
:class:`PaymentMethod <pyticketswitch.payment_methods.PaymentMethod>`.
Attributes:
card_number (str): the long credit card number.
expiry_month (int): the month the card expires in. Defaults to
:obj:`None`.
expiry_year (int): the year the card expires in. defaults to
:obj:`None`.
start_month (int): the month the card expires in. Defaults to
:obj:`None`.
start_year (int): the year the card expires in. :obj:`None`.
ccv2 (str): credit card security code. Defaults to :obj:`None`.
issue_number (str): issue number of the card. Defaults to :obj:`None`.
billing_address (:class:`Address <pyticketswitch.address.Address>`):
used when the customer wishes to use an alternate billing address.
when not specified the customer address will be used.
return_url (str): some card debitors may decide that they need to
redirect the user to a third party for verification (for example
3d secure). These parties need a location to return a customer to.
When available, it's recomended to provide it. In the situation
where a return url is required but not provided, then the payment
will fail.
return_token (str): a unique token that can be used by you to identify
when a card debitor returns to you.
user_agent (str): the customer's browser's User-Agent header.
only nessicary when providing a return url.
accept (str): the customer's browser's Accept header.
remote_site (str): the remote site's domain. must match the domain of
the return_url.
"""
def __init__(self, card_number, expiry_month=None,
expiry_year=None, start_month=None, start_year=None,
ccv2=None, issue_number=None, billing_address=None,
return_url=None, return_token=None, user_agent=None,
accept=None, remote_site=None):
self.card_number = card_number
self.expiry_month = expiry_month
self.expiry_year = expiry_year
self.start_month = start_month
self.start_year = start_year
self.ccv2 = ccv2
self.issue_number = issue_number
self.billing_address = billing_address
self.return_url = return_url
self.return_token = return_token
self.user_agent = user_agent
self.accept = accept
self.remote_site = remote_site
def as_api_parameters(self):
"""Generates a dictionary of parameters to be passed back to the API.
Returns:
dict: a set of parameters describing the card details to the API.
"""
params = {
'card_number': self.card_number,
}
missing_expiry_year = not self.expiry_year
missing_expiry_month = not self.expiry_month
if missing_expiry_year or missing_expiry_month:
raise InvalidParametersError(
'both expiry_year and expiry_month must be specified')
params.update(
expiry_date='{:0>2}{:0>2}'.format(
self.expiry_month,
# handle 4 digit years
str(self.expiry_year)[-2:]
)
)
missing_start_year = not self.start_year
missing_start_month = not self.start_month
specifying_start_date = self.start_year or self.start_month
if specifying_start_date and (missing_start_year or missing_start_month):
raise InvalidParametersError(
'both start_year and start_month must be specified or neither specified')
if specifying_start_date:
params.update(
start_date='{:0>2}{:0>2}'.format(
self.start_month,
str(self.start_year)[-2:]
)
)
if self.ccv2:
params.update(cv_two=self.ccv2)
if self.issue_number:
params.update(issue_number=self.issue_number)
if self.billing_address:
params.update(
**self.billing_address.as_api_billing_address_parameters()
)
if self.return_url:
params.update(return_url=self.return_url)
if self.return_token:
params.update(return_token=self.return_token)
if self.user_agent:
params.update(client_http_user_agent=self.user_agent)
if self.accept:
params.update(client_http_accept=self.accept)
if self.remote_site:
params.update(remote_site=self.remote_site)
return params
class RedirectionDetails(object):
"""Information that specifies where a customer will be returned to after
being redirected to an external payment provider.
Implements
:class:`PaymentMethod <pyticketswitch.payment_methods.PaymentMethod>`.
Attributes:
token (str): a unique token that can be used by you to identify when
this callout returns to your website.
url (str): the URL that the payment provider should redirect back to on
success/failure of the customers payment.
user_agent (str): the customer's browser's User-Agent header.
accept (str): the customer's browser's Accept header.
remote_site (str): the remote site's domain must match the domain of the
return_url.
"""
def __init__(self, token, url, user_agent, accept, remote_site):
self.token = token
self.url = url
self.user_agent = user_agent
self.accept = accept
self.remote_site = remote_site
def as_api_parameters(self):
"""Generate API keyword args for these details.
Returns:
dict: the redirection details in a format the API will understand.
"""
return {
'return_token': self.token,
'return_url': self.url,
'client_http_user_agent': self.user_agent,
'client_http_accept': self.accept,
'remote_site': self.remote_site,
}
class StripeDetails(object):
"""For use with self generated stripe tokens
Can be used to provide stripe tokens directly to the API at purchase time
avoiding a callout/callback cycle.
Implements
:class:`PaymentMethod <pyticketswitch.payment_methods.PaymentMethod>`.
Attributes:
tokens (dict): dictionary of stripe card tokens indexed on bundle source
code. If there are multiple bundles in the trolley, then a unique
stripe token must be provided for each of the bundles you wish to
purchase with stripe.
"""
def __init__(self, tokens):
self.tokens = tokens
def as_api_parameters(self):
"""Generate API keyword args for these details.
Returns:
dict: the stripe details in a format the API will understand.
"""
return {
'{}_callback/stripeToken'.format(source): token
for source, token in self.tokens.items()
}
class CiderDetails(object):
"""For use with multiple payment tokens and details
Can be used to provide payment tokens and details directly to the API at
purchase time, avoiding a callout/callback cycle.
Implements
:class:`PaymentMethod <pyticketswitch.payment_methods.PaymentMethod>`.
Attributes:
data (dict): dictionary of payment tokens and details
system_codes (list): a list of systems for which payment is being made
"""
def __init__(self, data, system_codes):
self.data = data
self.system_codes = system_codes
def as_api_parameters(self):
"""Generate API keyword args for these details.
Returns:
dict: the cider debitor details in the format the API can use.
"""
data = {}
for system in self.system_codes:
data.update({
"{0}_callback/{1}".format(system, variable): self.data[variable]
for variable in self.data.keys()
})
return data
PaymentMethod.register(CardDetails)
PaymentMethod.register(RedirectionDetails)
PaymentMethod.register(StripeDetails)
PaymentMethod.register(CiderDetails)
| |
from unittest.mock import call
from unittest.mock import Mock
from sqlalchemy import exc
from sqlalchemy.orm import collections
from sqlalchemy.orm import relationship
from sqlalchemy.orm import validates
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import ne_
from sqlalchemy.testing.fixtures import fixture_session
from test.orm import _fixtures
class ValidatorTest(_fixtures.FixtureTest):
def test_scalar(self):
users = self.tables.users
canary = Mock()
class User(fixtures.ComparableEntity):
@validates("name")
def validate_name(self, key, name):
canary(key, name)
ne_(name, "fred")
return name + " modified"
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(name="ed")
eq_(u1.name, "ed modified")
assert_raises(AssertionError, setattr, u1, "name", "fred")
eq_(u1.name, "ed modified")
eq_(canary.mock_calls, [call("name", "ed"), call("name", "fred")])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name="ed modified").one(),
User(name="ed"),
)
def test_collection(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
canary = Mock()
class User(fixtures.ComparableEntity):
@validates("addresses")
def validate_address(self, key, ad):
canary(key, ad)
assert "@" in ad.email_address
return ad
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(name="edward")
a0 = Address(email_address="noemail")
assert_raises(AssertionError, u1.addresses.append, a0)
a1 = Address(id=15, email_address="foo@bar.com")
u1.addresses.append(a1)
eq_(canary.mock_calls, [call("addresses", a0), call("addresses", a1)])
sess.add(u1)
sess.commit()
eq_(
sess.query(User).filter_by(name="edward").one(),
User(
name="edward", addresses=[Address(email_address="foo@bar.com")]
),
)
def test_validators_dict(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(fixtures.ComparableEntity):
@validates("name")
def validate_name(self, key, name):
ne_(name, "fred")
return name + " modified"
@validates("addresses")
def validate_address(self, key, ad):
assert "@" in ad.email_address
return ad
def simple_function(self, key, value):
return key, value
u_m = self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
eq_(
dict((k, v[0].__name__) for k, v in list(u_m.validators.items())),
{"name": "validate_name", "addresses": "validate_address"},
)
def test_validator_w_removes(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
canary = Mock()
class User(fixtures.ComparableEntity):
@validates("name", include_removes=True)
def validate_name(self, key, item, remove):
canary(key, item, remove)
return item
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.name = "ed"
u1.name = "mary"
del u1.name
a1, a2, a3 = Address(), Address(), Address()
u1.addresses.append(a1)
u1.addresses.remove(a1)
u1.addresses = [a1, a2]
u1.addresses = [a2, a3]
eq_(
canary.mock_calls,
[
call("name", "ed", False),
call("name", "mary", False),
call("name", "mary", True),
# append a1
call("addresses", a1, False),
# remove a1
call("addresses", a1, True),
# set to [a1, a2] - this is two appends
call("addresses", a1, False),
call("addresses", a2, False),
# set to [a2, a3] - this is a remove of a1,
# append of a3. the appends are first.
# in 1.2 due to #3896, we also get 'a2' in the
# validates as it is part of the set
call("addresses", a2, False),
call("addresses", a3, False),
call("addresses", a1, True),
],
)
def test_validator_bulk_collection_set(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(fixtures.ComparableEntity):
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
if not remove:
assert isinstance(item, str)
else:
assert isinstance(item, Address)
item = Address(email_address=item)
return item
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses.append("e1")
u1.addresses.append("e2")
eq_(
u1.addresses,
[Address(email_address="e1"), Address(email_address="e2")],
)
u1.addresses = ["e3", "e4"]
eq_(
u1.addresses,
[Address(email_address="e3"), Address(email_address="e4")],
)
def test_validator_bulk_dict_set(self):
users, addresses, Address = (
self.tables.users,
self.tables.addresses,
self.classes.Address,
)
class User(fixtures.ComparableEntity):
@validates("addresses", include_removes=True)
def validate_address(self, key, item, remove):
if not remove:
assert isinstance(item, str)
else:
assert isinstance(item, Address)
item = Address(email_address=item)
return item
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
collection_class=collections.attribute_mapped_collection(
"email_address"
),
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses["e1"] = "e1"
u1.addresses["e2"] = "e2"
eq_(
u1.addresses,
{
"e1": Address(email_address="e1"),
"e2": Address(email_address="e2"),
},
)
u1.addresses = {"e3": "e3", "e4": "e4"}
eq_(
u1.addresses,
{
"e3": Address(email_address="e3"),
"e4": Address(email_address="e4"),
},
)
def test_validator_as_callable_object(self):
"""test #6538"""
users = self.tables.users
canary = Mock()
class SomeValidator:
def __call__(self, obj, key, name):
canary(key, name)
ne_(name, "fred")
return name + " modified"
class User(fixtures.ComparableEntity):
sv = validates("name")(SomeValidator())
self.mapper_registry.map_imperatively(User, users)
u1 = User(name="ed")
eq_(u1.name, "ed modified")
def test_validator_multi_warning(self):
users = self.tables.users
class Foo:
@validates("name")
def validate_one(self, key, value):
pass
@validates("name")
def validate_two(self, key, value):
pass
assert_raises_message(
exc.InvalidRequestError,
"A validation function for mapped attribute "
"'name' on mapper Mapper|Foo|users already exists",
self.mapper_registry.map_imperatively,
Foo,
users,
)
class Bar:
@validates("id")
def validate_three(self, key, value):
return value + 10
@validates("id", "name")
def validate_four(self, key, value):
return value + "foo"
assert_raises_message(
exc.InvalidRequestError,
"A validation function for mapped attribute "
"'name' on mapper Mapper|Bar|users already exists",
self.mapper_registry.map_imperatively,
Bar,
users,
)
def test_validator_wo_backrefs_wo_removes(self):
self._test_validator_backrefs(False, False)
def test_validator_wo_backrefs_w_removes(self):
self._test_validator_backrefs(False, True)
def test_validator_w_backrefs_wo_removes(self):
self._test_validator_backrefs(True, False)
def test_validator_w_backrefs_w_removes(self):
self._test_validator_backrefs(True, True)
def _test_validator_backrefs(self, include_backrefs, include_removes):
users, addresses = (self.tables.users, self.tables.addresses)
canary = Mock()
class User(fixtures.ComparableEntity):
if include_removes:
@validates(
"addresses",
include_removes=True,
include_backrefs=include_backrefs,
)
def validate_address(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates(
"addresses",
include_removes=False,
include_backrefs=include_backrefs,
)
def validate_address(self, key, item):
canary(key, item)
return item
class Address(fixtures.ComparableEntity):
if include_removes:
@validates(
"user",
include_backrefs=include_backrefs,
include_removes=True,
)
def validate_user(self, key, item, remove):
canary(key, item, remove)
return item
else:
@validates("user", include_backrefs=include_backrefs)
def validate_user(self, key, item):
canary(key, item)
return item
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u2 = User()
a1, a2 = Address(), Address()
# 3 append/set, two removes
u1.addresses.append(a1)
u1.addresses.append(a2)
a2.user = u2
del a1.user
u2.addresses.remove(a2)
# copy, so that generation of the
# comparisons don't get caught
calls = list(canary.mock_calls)
if include_backrefs:
if include_removes:
eq_(
calls,
[
# append #1
call("addresses", Address(), False),
# backref for append
call("user", User(addresses=[]), False),
# append #2
call("addresses", Address(user=None), False),
# backref for append
call("user", User(addresses=[]), False),
# assign a2.user = u2
call("user", User(addresses=[]), False),
# backref for u1.addresses.remove(a2)
call("addresses", Address(user=None), True),
# backref for u2.addresses.append(a2)
call("addresses", Address(user=None), False),
# del a1.user
call("user", User(addresses=[]), True),
# backref for u1.addresses.remove(a1)
call("addresses", Address(), True),
# u2.addresses.remove(a2)
call("addresses", Address(user=None), True),
# backref for a2.user = None
call("user", None, False),
],
)
else:
eq_(
calls,
[
call("addresses", Address()),
call("user", User(addresses=[])),
call("addresses", Address(user=None)),
call("user", User(addresses=[])),
call("user", User(addresses=[])),
call("addresses", Address(user=None)),
call("user", None),
],
)
else:
if include_removes:
eq_(
calls,
[
call("addresses", Address(), False),
call("addresses", Address(user=None), False),
call("user", User(addresses=[]), False),
call("user", User(addresses=[]), True),
call("addresses", Address(user=None), True),
],
)
else:
eq_(
calls,
[
call("addresses", Address()),
call("addresses", Address(user=None)),
call("user", User(addresses=[])),
],
)
| |
"""
Randomized tests for invariant properties of some clustering metrics
"""
import numpy as np
import warnings
from lsh_hdc.metrics import ClusteringMetrics, ConfusionMatrix2, \
hmean, _div, adjusted_rand_score, mutual_info_score, \
adjusted_mutual_info_score, gmean
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_almost_equal, assert_true, assert_equal
from sklearn.metrics import \
homogeneity_completeness_v_measure as sklearn_hcv, \
adjusted_rand_score as sklearn_ari, \
mutual_info_score as sklearn_mi, \
adjusted_mutual_info_score as sklearn_ami
from lsh_hdc.hungarian import linear_sum_assignment
def assignment_score_slow(cm, normalize=True, rpad=False, cpad=False):
"""Calls Python/Numpy implementation of the Hungarian method
Testing version (uses SciPy's implementation)
"""
cost_matrix = -cm.to_array(rpad=rpad, cpad=cpad)
ris, cis = linear_sum_assignment(cost_matrix)
score = -cost_matrix[ris, cis].sum()
if normalize:
score = _div(score, cm.grand_total)
return score
def check_with_nans(num1, num2, places=None, msg=None, delta=None, ensure_nans=True):
nancheck_msg = "NaN check failed for '%s'" % msg
if np.isnan(num1):
if ensure_nans:
assert_true(np.isnan(num2), msg=nancheck_msg)
elif not np.isnan(num2):
warnings.warn(nancheck_msg)
elif np.isnan(num2):
if ensure_nans:
assert_true(np.isnan(num1), msg=nancheck_msg)
elif not np.isnan(num1):
warnings.warn(nancheck_msg)
else:
assert_almost_equal(num1, num2, places=places, msg=msg, delta=delta)
def test_m1():
"""M1 model
"""
t2 = ClusteringMetrics(rows=10 * np.ones((2, 2), dtype=int))
t8 = ClusteringMetrics(rows=10 * np.ones((8, 8), dtype=int))
assert_almost_equal(0.0, t2.vi_similarity_m1())
assert_almost_equal(0.0, t8.vi_similarity_m1())
assert_almost_equal(0.0, t2.split_join_similarity_m1())
assert_almost_equal(0.0, t8.split_join_similarity_m1())
assert_almost_equal(0.0, t2.assignment_score_m1())
assert_almost_equal(0.0, t8.assignment_score_m1())
def test_RxC_general():
"""General conteingency-table mathods
"""
for _ in xrange(100):
size = np.random.randint(4, 100)
a = np.random.randint(low=0, high=np.random.randint(low=2, high=100),
size=(size,))
b = np.random.randint(low=0, high=np.random.randint(low=2, high=100),
size=(size,))
cm = ClusteringMetrics.from_labels(a, b)
assert_almost_equal(
cm.assignment_score(model=None),
assignment_score_slow(cm, rpad=False, cpad=False))
assert_almost_equal(
cm.assignment_score(model=None),
assignment_score_slow(cm, rpad=True, cpad=True))
for model in ['m1', 'm2r', 'm2c', 'm3']:
assert_almost_equal(
cm.grand_total,
sum(cm.expected(model=model).itervalues()))
assert_almost_equal(
cm.assignment_score(model=model),
cm.adjust_to_null(cm.assignment_score, model=model)[0])
assert_almost_equal(
cm.split_join_similarity(model=model),
cm.adjust_to_null(cm.split_join_similarity, model=model)[0])
def test_RxC_metrics():
"""Alternative implementations should coincide for RxC matrices
"""
for _ in xrange(100):
ltrue = np.random.randint(low=0, high=5, size=(20,))
lpred = np.random.randint(low=0, high=5, size=(20,))
cm = ClusteringMetrics.from_labels(ltrue, lpred)
# homogeneity, completeness, V-measure
expected_v = cm.vi_similarity_m3()
expected_hcv = sklearn_hcv(ltrue, lpred)
actual_hcv = cm.entropy_scores()
assert_array_almost_equal(actual_hcv, expected_hcv)
assert_array_almost_equal(actual_hcv[2], expected_v)
# mutual information score
expected_mi = sklearn_mi(ltrue, lpred)
actual_mi = mutual_info_score(ltrue, lpred)
assert_array_almost_equal(actual_mi, expected_mi)
# adjusted mutual information
expected_ami = sklearn_ami(ltrue, lpred)
actual_ami = adjusted_mutual_info_score(ltrue, lpred)
assert_array_almost_equal(actual_ami, expected_ami)
# adjusted rand index
expected_ari = sklearn_ari(ltrue, lpred)
actual_ari = adjusted_rand_score(ltrue, lpred)
assert_array_almost_equal(actual_ari, expected_ari)
def test_2x2_invariants():
"""Alternative implementations should coincide for 2x2 matrices
"""
for _ in xrange(100):
cm = ConfusionMatrix2.from_random_counts(low=0, high=10)
# object idempotency
assert_equal(
cm.to_ccw(),
ConfusionMatrix2.from_ccw(*cm.to_ccw()).to_ccw(),
msg="must be able to convert to tuple and create from tuple")
# pairwise H, C, V
h, c, v = cm.pairwise_hcv()[:3]
check_with_nans(v, gmean(h, c), ensure_nans=False)
# informedness
actual_info = cm.informedness()
expected_info_1 = cm.TPR() + cm.TNR() - 1.0
expected_info_2 = cm.TPR() - cm.FPR()
check_with_nans(actual_info, expected_info_1, 4, ensure_nans=False)
check_with_nans(actual_info, expected_info_2, 4, ensure_nans=False)
# markedness
actual_mark = cm.markedness()
expected_mark_1 = cm.PPV() + cm.NPV() - 1.0
expected_mark_2 = cm.PPV() - cm.FOR()
check_with_nans(actual_mark, expected_mark_1, 4, ensure_nans=False)
check_with_nans(actual_mark, expected_mark_2, 4, ensure_nans=False)
# matthews corr coeff
# actual_mcc = cm.matthews_corr()
# expected_mcc = gmean(actual_info, actual_mark)
# check_with_nans(actual_mcc, expected_mcc, 4, ensure_nans=False)
# kappas
actual_kappa = cm.kappa()
# kappa is the same as harmonic mean of kappa components
expected_kappa_1 = hmean(*cm.kappas()[:2])
check_with_nans(actual_kappa, expected_kappa_1, 4, ensure_nans=False)
# kappa is the same as accuracy adjusted for chance
expected_kappa_2 = hmean(*cm.adjust_to_null(cm.accuracy, model='m3'))
check_with_nans(actual_kappa, expected_kappa_2, 4, ensure_nans=False)
# kappa is the same as Dice coeff adjusted for chance
expected_kappa_3 = hmean(*cm.adjust_to_null(cm.dice_coeff, model='m3'))
check_with_nans(actual_kappa, expected_kappa_3, 4, ensure_nans=False)
# odds ratio and Yule's Q
actual_odds_ratio = cm.DOR()
actual_yule_q = cm.yule_q()
expected_yule_q = _div(actual_odds_ratio - 1.0, actual_odds_ratio + 1.0)
expected_odds_ratio = _div(cm.PLL(), cm.NLL())
check_with_nans(actual_odds_ratio, expected_odds_ratio, 4, ensure_nans=False)
check_with_nans(actual_yule_q, expected_yule_q, 4, ensure_nans=False)
# F-score and Dice
expected_f = hmean(cm.precision(), cm.recall())
actual_f = cm.fscore()
check_with_nans(expected_f, actual_f, 6)
check_with_nans(expected_f, cm.dice_coeff(), 6, ensure_nans=False)
# association coefficients (1)
dice = cm.dice_coeff()
expected_jaccard = _div(dice, 2.0 - dice)
actual_jaccard = cm.jaccard_coeff()
check_with_nans(actual_jaccard, expected_jaccard, 6, ensure_nans=False)
# association coefficients (2)
jaccard = cm.jaccard_coeff()
expected_ss2 = _div(jaccard, 2.0 - jaccard)
actual_ss2 = cm.sokal_sneath_coeff()
check_with_nans(actual_ss2, expected_ss2, 6, ensure_nans=False)
# adjusted ochiai
actual = cm.ochiai_coeff_adj()
expected = hmean(*cm.adjust_to_null(cm.ochiai_coeff, model='m3'))
check_with_nans(actual, expected, 6, ensure_nans=False)
| |
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2015/9/1 ~ 2019/11/6
# @Author : Allen Woo
# coding: utf-8
import random
from PIL import Image
from PIL import ImageFilter
from PIL.ImageDraw import Draw
from PIL.ImageFont import truetype
from apps.configs.sys_config import FONT_PATH
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
try:
from wheezy.captcha import image as wheezy_captcha
except ImportError:
wheezy_captcha = None
DEFAULT_FONTS = [FONT_PATH]
if wheezy_captcha:
__all__ = ['ImageCaptcha', 'WheezyCaptcha']
else:
__all__ = ['ImageCaptcha']
table = []
for i in range(256):
table.append(i * 1.97)
class _Captcha:
def generate(self, chars, format='png'):
"""Generate an Image Captcha of the given characters.
:param chars: text to be generated.
:param format: image file format
"""
im = self.generate_image(chars)
out = BytesIO()
im.save(out, format=format)
out.seek(0)
return out
def write(self, chars, output, format='png'):
"""Generate and write an image CAPTCHA data to the output.
:param chars: text to be generated.
:param output: output destination.
:param format: image file format
"""
im = self.generate_image(chars)
return im.save(output, format=format)
class WheezyCaptcha(_Captcha):
"""Create an image CAPTCHA with wheezy.captcha."""
def __init__(self, width=200, height=75, fonts=None):
self._width = width
self._height = height
self._fonts = fonts or DEFAULT_FONTS
def generate_image(self, chars):
text_drawings = [
wheezy_captcha.warp(),
wheezy_captcha.rotate(),
wheezy_captcha.offset(),
]
fn = wheezy_captcha.captcha(
drawings=[
wheezy_captcha.background(),
wheezy_captcha.text(fonts=self._fonts, drawings=text_drawings),
wheezy_captcha.curve(),
wheezy_captcha.noise(),
wheezy_captcha.smooth(),
],
width=self._width,
height=self._height,
)
return fn(chars)
class ImageCaptcha(_Captcha):
"""Create an image CAPTCHA.
Many of the codes are borrowed from wheezy.captcha, with a modification
for memory and developer friendly.
ImageCaptcha has one built-in font, DroidSansMono, which is licensed under
Apache License 2. You should always use your own fonts::
captcha = ImageCaptcha(fonts=['/path/to/A.ttf', '/path/to/B.ttf'])
You can put as many fonts as you like. But be aware of your memory, all of
the fonts are loaded into your memory, so keep them a lot, but not too
many.
:param width: The width of the CAPTCHA image.
:param height: The height of the CAPTCHA image.
:param fonts: Fonts to be used to generate CAPTCHA images.
:param font_sizes: Random choose a font size from this parameters.
"""
def __init__(self, width=160, height=60, fonts=None, font_sizes=None):
self._width = width
self._height = height
self._fonts = fonts or DEFAULT_FONTS
self._font_sizes = font_sizes or (42, 50, 56)
self._truefonts = []
@property
def truefonts(self):
if self._truefonts:
return self._truefonts
self._truefonts = tuple([
truetype(n, s)
for n in self._fonts
for s in self._font_sizes
])
return self._truefonts
@staticmethod
def create_noise_curve(image, color):
w, h = image.size
x1 = random.randint(0, int(w / 5))
x2 = random.randint(w - int(w / 5), w)
y1 = random.randint(int(h / 5), h - int(h / 5))
y2 = random.randint(y1, h - int(h / 5))
points = [x1, y1, x2, y2]
end = random.randint(160, 200)
start = random.randint(0, 20)
Draw(image).arc(points, start, end, fill=color)
return image
@staticmethod
def create_noise_dots(image, color, width=3, number=30):
draw = Draw(image)
w, h = image.size
while number:
x1 = random.randint(0, w)
y1 = random.randint(0, h)
draw.line(((x1, y1), (x1 - 1, y1 - 1)), fill=color, width=width)
number -= 1
return image
def create_captcha_image(self, chars, color, background):
"""Create the CAPTCHA image itself.
:param chars: text to be generated.
:param color: color of the text.
:param background: color of the background.
The color should be a tuple of 3 numbers, such as (0, 255, 255).
"""
image = Image.new('RGB', (self._width, self._height), background)
draw = Draw(image)
def _draw_character(c):
font = random.choice(self.truefonts)
w, h = draw.textsize(c, font=font)
dx = random.randint(0, 4)
dy = random.randint(0, 6)
im = Image.new('RGBA', (w + dx, h + dy))
Draw(im).text((dx, dy), c, font=font, fill=color)
# rotate
im = im.crop(im.getbbox())
im = im.rotate(random.uniform(-30, 30), Image.BILINEAR, expand=1)
# warp
dx = w * random.uniform(0.1, 0.3)
dy = h * random.uniform(0.2, 0.3)
x1 = int(random.uniform(-dx, dx))
y1 = int(random.uniform(-dy, dy))
x2 = int(random.uniform(-dx, dx))
y2 = int(random.uniform(-dy, dy))
w2 = w + abs(x1) + abs(x2)
h2 = h + abs(y1) + abs(y2)
data = (
x1, y1,
-x1, h2 - y2,
w2 + x2, h2 + y2,
w2 - x2, -y1,
)
im = im.resize((w2, h2))
im = im.transform((w, h), Image.QUAD, data)
return im
images = []
for c in chars:
if random.random() > 0.5:
images.append(_draw_character(" "))
images.append(_draw_character(c))
text_width = sum([im.size[0] for im in images])
width = max(text_width, self._width)
image = image.resize((width, self._height))
average = int(text_width / len(chars))
rand = int(0.25 * average)
offset = int(average * 0.1)
for im in images:
w, h = im.size
mask = im.convert('L').point(table)
image.paste(im, (offset, int((self._height - h) / 2)), mask)
offset = offset + w + random.randint(-rand, 0)
if width > self._width:
image = image.resize((self._width, self._height))
return image
def generate_image(self, chars):
"""Generate the image of the given characters.
:param chars: text to be generated.
"""
background = random_color(238, 255)
color = random_color(10, 200, random.randint(220, 255))
im = self.create_captcha_image(chars, color, background)
self.create_noise_dots(im, color)
self.create_noise_curve(im, color)
im = im.filter(ImageFilter.SMOOTH)
return im
def random_color(start, end, opacity=None):
red = random.randint(start, end)
green = random.randint(start, end)
blue = random.randint(start, end)
if opacity is None:
return (red, green, blue)
return (red, green, blue, opacity)
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.apphosting.tools.devappserver2.inotify_file_watcher."""
import logging
import os
import os.path
import shutil
import sys
import tempfile
import unittest
from google.appengine.tools.devappserver2 import inotify_file_watcher
@unittest.skipUnless(sys.platform.startswith('linux'), 'requires linux')
class TestInotifyFileWatcher(unittest.TestCase):
"""Tests for inotify_file_watcher.InotifyFileWatcher."""
def setUp(self):
self._directory = tempfile.mkdtemp() # The watched directory
self._junk_directory = tempfile.mkdtemp() # A scrap directory.
self._watcher = inotify_file_watcher.InotifyFileWatcher([self._directory])
logging.debug('watched directory=%r, junk directory=%r',
self._directory, self._junk_directory)
def tearDown(self):
self._watcher.quit()
shutil.rmtree(self._directory)
shutil.rmtree(self._junk_directory)
def _create_file(self, relative_path):
realpath = os.path.realpath(os.path.join(self._directory, relative_path))
with open(realpath, 'w'):
pass
return realpath
def _create_directory(self, relative_path):
realpath = os.path.realpath(os.path.join(self._directory, relative_path))
os.mkdir(realpath)
return realpath
def _create_directory_tree(self, path, num_directories):
"""Create exactly num_directories subdirectories in path."""
assert num_directories >= 0
if not num_directories:
return
self._create_directory(path)
num_directories -= 1
# Divide the remaining number of directories to create among 4
# subdirectories in an approximate even fashion.
for i in range(4, 0, -1):
sub_dir_size = num_directories/i
self._create_directory_tree(os.path.join(path, 'dir%d' % i), sub_dir_size)
num_directories -= sub_dir_size
def test_file_created(self):
self._watcher.start()
path = self._create_file('test')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_file_modified(self):
path = self._create_file('test')
self._watcher.start()
with open(path, 'w') as f:
f.write('testing')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_file_read(self):
path = self._create_file('test')
with open(path, 'w') as f:
f.write('testing')
self._watcher.start()
with open(path, 'r') as f:
f.read()
# Reads should not trigger updates.
self.assertEqual(
set(),
self._watcher._get_changed_paths())
def test_file_deleted(self):
path = self._create_file('test')
self._watcher.start()
os.remove(path)
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_file_renamed(self):
source = self._create_file('test')
target = os.path.join(os.path.dirname(source), 'test2')
self._watcher.start()
os.rename(source, target)
self.assertEqual(
set([source, target]),
self._watcher._get_changed_paths())
def test_create_directory(self):
self._watcher.start()
directory = self._create_directory('test')
self.assertEqual(
set([directory]),
self._watcher._get_changed_paths())
def test_file_created_in_directory(self):
directory = self._create_directory('test')
self._watcher.start()
path = self._create_file('test/file')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_move_directory(self):
source = self._create_directory('test')
target = os.path.join(os.path.dirname(source), 'test2')
self._watcher.start()
os.rename(source, target)
self.assertEqual(
set([source, target]),
self._watcher._get_changed_paths())
def test_move_directory_out_of_watched(self):
source = self._create_directory('test')
target = os.path.join(self._junk_directory, 'test')
self._watcher.start()
os.rename(source, target)
self.assertEqual(
set([source]),
self._watcher._get_changed_paths())
with open(os.path.join(target, 'file'), 'w'):
pass
# Changes to files in subdirectories that have been moved should be ignored.
self.assertEqual(
set([]),
self._watcher._get_changed_paths())
def test_move_directory_into_watched(self):
source = os.path.join(self._junk_directory, 'source')
target = os.path.join(self._directory, 'target')
os.mkdir(source)
self._watcher.start()
os.rename(source, target)
self.assertEqual(
set([target]),
self._watcher._get_changed_paths())
file_path = os.path.join(target, 'file')
with open(file_path, 'w+'):
pass
self.assertEqual(
set([file_path]),
self._watcher._get_changed_paths())
def test_directory_deleted(self):
path = self._create_directory('test')
self._watcher.start()
os.rmdir(path)
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_subdirectory_deleted(self):
"""Tests that internal _directory_to_subdirs is updated on delete."""
path = self._create_directory('test')
sub_path = self._create_directory('test/test2')
self._watcher.start()
self.assertEqual(
set([sub_path]),
self._watcher._directory_to_subdirs[path])
os.rmdir(sub_path)
self.assertEqual(
set([sub_path]),
self._watcher._get_changed_paths())
self.assertEqual(
set(),
self._watcher._directory_to_subdirs[path])
os.rmdir(path)
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_symlink_directory(self):
sym_target = os.path.join(self._directory, 'test')
os.mkdir(os.path.join(self._junk_directory, 'subdir'))
self._watcher.start()
# Check that an added symlinked directory is reported.
os.symlink(self._junk_directory, sym_target)
self.assertEqual(
set([sym_target]),
self._watcher._get_changed_paths())
# Check that a file added to the symlinked directory is reported.
with open(os.path.join(self._junk_directory, 'file1'), 'w'):
pass
self.assertEqual(
set([os.path.join(self._directory, 'test', 'file1')]),
self._watcher._get_changed_paths())
# Check that modifying the file in the symlinked directory is reported.
with open(os.path.join(self._junk_directory, 'file1'), 'w') as fp:
fp.write('some data')
self.assertEqual(
set([os.path.join(self._directory, 'test', 'file1')]),
self._watcher._get_changed_paths())
# Check that a removed symlinked directory is reported.
os.remove(sym_target)
self.assertEqual(
set([sym_target]),
self._watcher._get_changed_paths())
# Check that a file added to the removed symlinked directory is *not*
# reported.
with open(os.path.join(self._junk_directory, 'subdir', 'file2'), 'w'):
pass
self.assertEqual(
set(),
self._watcher._get_changed_paths())
@unittest.skip('b/11896748')
def test_symlink_file(self):
actual_file = os.path.join(self._junk_directory, 'moo')
with open(actual_file, 'w'):
pass
symbolic_link = os.path.join(self._directory, 'moo')
self._watcher.start()
# Check that symlinking a file into watched directory is reported.
os.symlink(actual_file, symbolic_link)
self.assertEqual(
set([symbolic_link]),
self._watcher._get_changed_paths())
# Check that modifying the source file is reported.
with open(actual_file, 'w') as fp:
fp.write('some data')
self.assertEqual(
set([symbolic_link]),
self._watcher._get_changed_paths())
# Check that deleting the source file is reported.
os.unlink(actual_file)
self.assertEqual(
set([symbolic_link]),
self._watcher._get_changed_paths())
def test_many_directories(self):
# Linux supports a limited number of watches per file descriptor. The
# default is 8192 (i.e. 2^13).
self._create_directory_tree('bigdir', num_directories=10000)
self._watcher.start()
path = self._create_file('bigdir/dir4/dir4/file')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
@unittest.skipUnless(sys.platform.startswith('linux'), 'requires linux')
class TestInotifyFileWatcherMultipleDirectories(unittest.TestCase):
"""Tests for inotify_file_watcher.InotifyFileWatcher."""
def setUp(self):
self._directories = [tempfile.mkdtemp() for _ in range(4)]
self._watcher = inotify_file_watcher.InotifyFileWatcher(self._directories)
self._watcher.start()
def tearDown(self):
self._watcher.quit()
for directory in self._directories:
shutil.rmtree(directory)
@staticmethod
def _create_file(*paths):
realpath = os.path.realpath(os.path.join(*paths))
with open(realpath, 'w'):
pass
return realpath
def testInDir0(self):
path = self._create_file(self._directories[0], 'moo')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def testInDir2(self):
path = self._create_file(self._directories[2], 'moo')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def testInDir1And3(self):
path1 = self._create_file(self._directories[1], 'moo')
path3 = self._create_file(self._directories[3], 'moo')
self.assertEqual(
set([path1, path3]),
self._watcher._get_changed_paths())
if __name__ == '__main__':
unittest.main()
| |
import sys
import platform
import llvmlite.binding as ll
import llvmlite.llvmpy.core as lc
from llvmlite import ir
from numba import _dynfunc
from numba.core.callwrapper import PyCallWrapper
from numba.core.base import BaseContext, PYOBJECT
from numba.core import utils, types, config, cgutils, callconv, codegen, externals, fastmathpass, intrinsics
from numba.core.utils import cached_property
from numba.core.options import TargetOptions, include_default_options
from numba.core.runtime import rtsys
from numba.core.compiler_lock import global_compiler_lock
import numba.core.entrypoints
from numba.core.cpu_options import (ParallelOptions, FastMathOptions,
InlineOptions)
from numba.np import ufunc_db
# Keep those structures in sync with _dynfunc.c.
class ClosureBody(cgutils.Structure):
_fields = [('env', types.pyobject)]
class EnvBody(cgutils.Structure):
_fields = [
('globals', types.pyobject),
('consts', types.pyobject),
]
class CPUContext(BaseContext):
"""
Changes BaseContext calling convention
"""
allow_dynamic_globals = True
def __init__(self, typingctx, target='cpu'):
super().__init__(typingctx, target)
# Overrides
def create_module(self, name):
return self._internal_codegen._create_empty_module(name)
@global_compiler_lock
def init(self):
self.is32bit = (utils.MACHINE_BITS == 32)
self._internal_codegen = codegen.JITCPUCodegen("numba.exec")
# Add ARM ABI functions from libgcc_s
if platform.machine() == 'armv7l':
ll.load_library_permanently('libgcc_s.so.1')
# Map external C functions.
externals.c_math_functions.install(self)
# Initialize NRT runtime
rtsys.initialize(self)
# Add lower_extension attribute
self.lower_extensions = {}
from numba.parfors.parfor_lowering import _lower_parfor_parallel
from numba.parfors.parfor import Parfor
# Specify how to lower Parfor nodes using the lower_extensions
self.lower_extensions[Parfor] = _lower_parfor_parallel
def load_additional_registries(self):
# Add implementations that work via import
from numba.cpython import (builtins, charseq, enumimpl, hashing, heapq,
iterators, listobj, numbers, rangeobj,
setobj, slicing, tupleobj, unicode,)
from numba.core import optional
from numba.misc import gdb_hook, literal
from numba.np import linalg, polynomial, arraymath, arrayobj
from numba.typed import typeddict, dictimpl
from numba.typed import typedlist, listobject
from numba.experimental import jitclass, function_type
from numba.np import npdatetime
# Add target specific implementations
from numba.np import npyimpl
from numba.cpython import cmathimpl, mathimpl, printimpl, randomimpl
from numba.misc import cffiimpl
from numba.experimental.jitclass.base import ClassBuilder as \
jitclassimpl
self.install_registry(cmathimpl.registry)
self.install_registry(cffiimpl.registry)
self.install_registry(mathimpl.registry)
self.install_registry(npyimpl.registry)
self.install_registry(printimpl.registry)
self.install_registry(randomimpl.registry)
self.install_registry(jitclassimpl.class_impl_registry)
# load 3rd party extensions
numba.core.entrypoints.init_all()
@property
def target_data(self):
return self._internal_codegen.target_data
def with_aot_codegen(self, name, **aot_options):
aot_codegen = codegen.AOTCPUCodegen(name, **aot_options)
return self.subtarget(_internal_codegen=aot_codegen,
aot_mode=True)
def codegen(self):
return self._internal_codegen
@cached_property
def call_conv(self):
return callconv.CPUCallConv(self)
def get_env_body(self, builder, envptr):
"""
From the given *envptr* (a pointer to a _dynfunc.Environment object),
get a EnvBody allowing structured access to environment fields.
"""
body_ptr = cgutils.pointer_add(
builder, envptr, _dynfunc._impl_info['offsetof_env_body'])
return EnvBody(self, builder, ref=body_ptr, cast_ref=True)
def get_env_manager(self, builder):
envgv = self.declare_env_global(builder.module,
self.get_env_name(self.fndesc))
envarg = builder.load(envgv)
pyapi = self.get_python_api(builder)
pyapi.emit_environment_sentry(
envarg, debug_msg=self.fndesc.env_name,
)
env_body = self.get_env_body(builder, envarg)
return pyapi.get_env_manager(self.environment, env_body, envarg)
def get_generator_state(self, builder, genptr, return_type):
"""
From the given *genptr* (a pointer to a _dynfunc.Generator object),
get a pointer to its state area.
"""
return cgutils.pointer_add(
builder, genptr, _dynfunc._impl_info['offsetof_generator_state'],
return_type=return_type)
def build_list(self, builder, list_type, items):
"""
Build a list from the Numba *list_type* and its initial *items*.
"""
from numba.cpython import listobj
return listobj.build_list(self, builder, list_type, items)
def build_set(self, builder, set_type, items):
"""
Build a set from the Numba *set_type* and its initial *items*.
"""
from numba.cpython import setobj
return setobj.build_set(self, builder, set_type, items)
def build_map(self, builder, dict_type, item_types, items):
from numba.typed import dictobject
return dictobject.build_map(self, builder, dict_type, item_types, items)
def post_lowering(self, mod, library):
if self.fastmath:
fastmathpass.rewrite_module(mod, self.fastmath)
if self.is32bit:
# 32-bit machine needs to replace all 64-bit div/rem to avoid
# calls to compiler-rt
intrinsics.fix_divmod(mod)
library.add_linking_library(rtsys.library)
def create_cpython_wrapper(self, library, fndesc, env, call_helper,
release_gil=False):
wrapper_module = self.create_module("wrapper")
fnty = self.call_conv.get_function_type(fndesc.restype, fndesc.argtypes)
wrapper_callee = ir.Function(wrapper_module, fnty, fndesc.llvm_func_name)
builder = PyCallWrapper(self, wrapper_module, wrapper_callee,
fndesc, env, call_helper=call_helper,
release_gil=release_gil)
builder.build()
library.add_ir_module(wrapper_module)
def create_cfunc_wrapper(self, library, fndesc, env, call_helper):
wrapper_module = self.create_module("cfunc_wrapper")
fnty = self.call_conv.get_function_type(fndesc.restype, fndesc.argtypes)
wrapper_callee = ir.Function(wrapper_module, fnty, fndesc.llvm_func_name)
ll_argtypes = [self.get_value_type(ty) for ty in fndesc.argtypes]
ll_return_type = self.get_value_type(fndesc.restype)
wrapty = ir.FunctionType(ll_return_type, ll_argtypes)
wrapfn = ir.Function(wrapper_module, wrapty, fndesc.llvm_cfunc_wrapper_name)
builder = ir.IRBuilder(wrapfn.append_basic_block('entry'))
status, out = self.call_conv.call_function(
builder, wrapper_callee, fndesc.restype, fndesc.argtypes,
wrapfn.args, attrs=('noinline',))
with builder.if_then(status.is_error, likely=False):
# If (and only if) an error occurred, acquire the GIL
# and use the interpreter to write out the exception.
pyapi = self.get_python_api(builder)
gil_state = pyapi.gil_ensure()
self.call_conv.raise_error(builder, pyapi, status)
cstr = self.insert_const_string(builder.module, repr(self))
strobj = pyapi.string_from_string(cstr)
pyapi.err_write_unraisable(strobj)
pyapi.decref(strobj)
pyapi.gil_release(gil_state)
builder.ret(out)
library.add_ir_module(wrapper_module)
def get_executable(self, library, fndesc, env):
"""
Returns
-------
(cfunc, fnptr)
- cfunc
callable function (Can be None)
- fnptr
callable function address
- env
an execution environment (from _dynfunc)
"""
# Code generation
baseptr = library.get_pointer_to_function(fndesc.llvm_func_name)
fnptr = library.get_pointer_to_function(fndesc.llvm_cpython_wrapper_name)
# Note: we avoid reusing the original docstring to avoid encoding
# issues on Python 2, see issue #1908
doc = "compiled wrapper for %r" % (fndesc.qualname,)
cfunc = _dynfunc.make_function(fndesc.lookup_module(),
fndesc.qualname.split('.')[-1],
doc, fnptr, env,
# objects to keepalive with the function
(library,)
)
library.codegen.set_env(self.get_env_name(fndesc), env)
return cfunc
def calc_array_sizeof(self, ndim):
'''
Calculate the size of an array struct on the CPU target
'''
aryty = types.Array(types.int32, ndim, 'A')
return self.get_abi_sizeof(self.get_value_type(aryty))
# Overrides
def get_ufunc_info(self, ufunc_key):
return ufunc_db.get_ufunc_info(ufunc_key)
# ----------------------------------------------------------------------------
# TargetOptions
_options_mixin = include_default_options(
"nopython",
"forceobj",
"looplift",
"_nrt",
"debug",
"boundscheck",
"nogil",
"no_rewrites",
"no_cpython_wrapper",
"no_cfunc_wrapper",
"parallel",
"fastmath",
"error_model",
"inline",
# Add "target_backend" as a accepted option for the CPU in @jit(...)
"target_backend",
)
class CPUTargetOptions(_options_mixin, TargetOptions):
def finalize(self, flags, options):
if not flags.is_set("enable_pyobject"):
flags.enable_pyobject = True
if not flags.is_set("enable_looplift"):
flags.enable_looplift = True
flags.inherit_if_not_set("nrt", default=True)
if not flags.is_set("debuginfo"):
flags.debuginfo = config.DEBUGINFO_DEFAULT
if not flags.is_set("boundscheck"):
flags.boundscheck = flags.debuginfo
flags.enable_pyobject_looplift = True
flags.inherit_if_not_set("fastmath")
flags.inherit_if_not_set("error_model", default="python")
# Add "target_backend" as a option that inherits from the caller
flags.inherit_if_not_set("target_backend")
# ----------------------------------------------------------------------------
# Internal
def remove_refct_calls(func):
"""
Remove redundant incref/decref within on a per block basis
"""
for bb in func.basic_blocks:
remove_null_refct_call(bb)
remove_refct_pairs(bb)
def remove_null_refct_call(bb):
"""
Remove refct api calls to NULL pointer
"""
pass
## Skipped for now
# for inst in bb.instructions:
# if isinstance(inst, lc.CallOrInvokeInstruction):
# fname = inst.called_function.name
# if fname == "Py_IncRef" or fname == "Py_DecRef":
# arg = inst.args[0]
# print(type(arg))
# if isinstance(arg, lc.ConstantPointerNull):
# inst.erase_from_parent()
def remove_refct_pairs(bb):
"""
Remove incref decref pairs on the same variable
"""
didsomething = True
while didsomething:
didsomething = False
increfs = {}
decrefs = {}
# Mark
for inst in bb.instructions:
if isinstance(inst, lc.CallOrInvokeInstruction):
fname = inst.called_function.name
if fname == "Py_IncRef":
arg = inst.operands[0]
increfs[arg] = inst
elif fname == "Py_DecRef":
arg = inst.operands[0]
decrefs[arg] = inst
# Sweep
for val in increfs.keys():
if val in decrefs:
increfs[val].erase_from_parent()
decrefs[val].erase_from_parent()
didsomething = True
| |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Deprecated/Options/PathOption.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test the PathOption canned option type, with tests for its
various canned validators.
"""
import os.path
import re
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re_dotall)
SConstruct_path = test.workpath('SConstruct')
def check(expect):
result = test.stdout().split('\n')
assert result[1:len(expect)+1] == expect, (result[1:len(expect)+1], expect)
#### test PathOption ####
test.subdir('lib', 'qt', ['qt', 'lib'], 'nolib' )
workpath = test.workpath()
libpath = os.path.join(workpath, 'lib')
test.write(SConstruct_path, """\
from SCons.Options.PathOption import PathOption
PO = PathOption
from SCons.Options import PathOption
qtdir = r'%s'
opts = Options(args=ARGUMENTS)
opts.AddOptions(
PathOption('qtdir', 'where the root of Qt is installed', qtdir),
PO('qt_libraries', 'where the Qt library is installed', r'%s'),
)
env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
print env['qtdir']
print env['qt_libraries']
print env.subst('$qt_libraries')
Default(env.Alias('dummy', None))
""" % (workpath, os.path.join('$qtdir', 'lib') ))
warnings = """
scons: warning: The Options class is deprecated; use the Variables class instead.
%s
scons: warning: The PathOption\\(\\) function is deprecated; use the PathVariable\\(\\) function instead.
%s""" % (TestSCons.file_expr, TestSCons.file_expr)
qtpath = workpath
libpath = os.path.join(qtpath, 'lib')
test.run(stderr=warnings)
check([qtpath, os.path.join('$qtdir', 'lib'), libpath])
qtpath = os.path.join(workpath, 'qt')
libpath = os.path.join(qtpath, 'lib')
test.run(arguments=['qtdir=%s' % qtpath], stderr=warnings)
check([qtpath, os.path.join('$qtdir', 'lib'), libpath])
qtpath = workpath
libpath = os.path.join(qtpath, 'nolib')
test.run(arguments=['qt_libraries=%s' % libpath], stderr=warnings)
check([qtpath, libpath, libpath])
qtpath = os.path.join(workpath, 'qt')
libpath = os.path.join(workpath, 'nolib')
test.run(arguments=['qtdir=%s' % qtpath, 'qt_libraries=%s' % libpath], stderr=warnings)
check([qtpath, libpath, libpath])
qtpath = os.path.join(workpath, 'non', 'existing', 'path')
qtpath_re = re.escape(qtpath)
expect_stderr = warnings + ("""
scons: \\*\\*\\* Path for option qtdir does not exist: %(qtpath_re)s
""" % locals()) + TestSCons.file_expr
test.run(arguments=['qtdir=%s' % qtpath], stderr=expect_stderr, status=2)
expect_stderr = warnings + ("""
scons: \\*\\*\\* Path for option qt_libraries does not exist: %(qtpath_re)s
""" % locals()) + TestSCons.file_expr
test.run(arguments=['qt_libraries=%s' % qtpath], stderr=expect_stderr, status=2)
default_file = test.workpath('default_file')
default_subdir = test.workpath('default_subdir')
existing_subdir = test.workpath('existing_subdir')
test.subdir(existing_subdir)
existing_file = test.workpath('existing_file')
test.write(existing_file, "existing_file\n")
non_existing_subdir = test.workpath('non_existing_subdir')
non_existing_file = test.workpath('non_existing_file')
default_file_re = re.escape(default_file)
default_subdir_re = re.escape(default_subdir)
existing_subdir_re = re.escape(existing_subdir)
existing_file_re = re.escape(existing_file)
non_existing_subdir_re = re.escape(non_existing_subdir)
non_existing_file_re = re.escape(non_existing_file)
test.write('SConstruct', """\
opts = Options(args=ARGUMENTS)
opts.AddOptions(
PathOption('X', 'X variable', r'%s', validator=PathOption.PathAccept),
)
env = Environment(options=opts)
print env['X']
Default(env.Alias('dummy', None))
""" % default_subdir)
test.run(stderr=warnings)
check([default_subdir])
test.run(arguments=['X=%s' % existing_file], stderr=warnings)
check([existing_file])
test.run(arguments=['X=%s' % non_existing_file], stderr=warnings)
check([non_existing_file])
test.run(arguments=['X=%s' % existing_subdir], stderr=warnings)
check([existing_subdir])
test.run(arguments=['X=%s' % non_existing_subdir], stderr=warnings)
check([non_existing_subdir])
test.must_not_exist(non_existing_file)
test.must_not_exist(non_existing_subdir)
test.write(SConstruct_path, """\
opts = Options(args=ARGUMENTS)
opts.AddOptions(
PathOption('X', 'X variable', r'%s', validator=PathOption.PathIsFile),
)
env = Environment(options=opts)
print env['X']
Default(env.Alias('dummy', None))
""" % default_file)
expect_stderr = warnings + ("""
scons: \\*\\*\\* File path for option X does not exist: %(default_file_re)s
""" % locals()) + TestSCons.file_expr
test.run(status=2, stderr=expect_stderr)
test.write(default_file, "default_file\n")
test.run(stderr=warnings)
check([default_file])
expect_stderr = warnings + ("""
scons: \\*\\*\\* File path for option X is a directory: %(existing_subdir_re)s
""" % locals()) + TestSCons.file_expr
test.run(arguments=['X=%s' % existing_subdir], status=2, stderr=expect_stderr)
test.run(arguments=['X=%s' % existing_file], stderr=warnings)
check([existing_file])
expect_stderr = warnings + ("""
scons: \\*\\*\\* File path for option X does not exist: %(non_existing_file_re)s
""" % locals()) + TestSCons.file_expr
test.run(arguments=['X=%s' % non_existing_file], status=2, stderr=expect_stderr)
test.write('SConstruct', """\
opts = Options(args=ARGUMENTS)
opts.AddOptions(
PathOption('X', 'X variable', r'%s', validator=PathOption.PathIsDir),
)
env = Environment(options=opts)
print env['X']
Default(env.Alias('dummy', None))
""" % default_subdir)
expect_stderr = warnings + ("""
scons: \\*\\*\\* Directory path for option X does not exist: %(default_subdir_re)s
""" % locals()) + TestSCons.file_expr
test.run(status=2, stderr=expect_stderr)
test.subdir(default_subdir)
test.run(stderr=warnings)
check([default_subdir])
expect_stderr = warnings + ("""
scons: \\*\\*\\* Directory path for option X is a file: %(existing_file_re)s
""" % locals()) + TestSCons.file_expr
test.run(arguments=['X=%s' % existing_file],
status=2,
stderr=expect_stderr)
test.run(arguments=['X=%s' % existing_subdir], stderr=warnings)
check([existing_subdir])
expect_stderr = warnings + ("""
scons: \\*\\*\\* Directory path for option X does not exist: %(non_existing_subdir_re)s
""" % locals()) + TestSCons.file_expr
test.run(arguments=['X=%s' % non_existing_subdir],
status=2,
stderr=expect_stderr)
test.write('SConstruct', """\
opts = Options(args=ARGUMENTS)
opts.AddOptions(
PathOption('X', 'X variable', r'%s', validator=PathOption.PathIsDirCreate),
)
env = Environment(options=opts)
print env['X']
Default(env.Alias('dummy', None))
""" % default_subdir)
test.run(stderr=warnings)
check([default_subdir])
expect_stderr = warnings + ("""
scons: \\*\\*\\* Path for option X is a file, not a directory: %(existing_file_re)s
""" % locals()) + TestSCons.file_expr
test.run(arguments=['X=%s' % existing_file], status=2, stderr=expect_stderr)
test.run(arguments=['X=%s' % existing_subdir], stderr=warnings)
check([existing_subdir])
test.run(arguments=['X=%s' % non_existing_subdir], stderr=warnings)
check([non_existing_subdir])
test.must_exist(non_existing_subdir)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| |
from __future__ import absolute_import, division, print_function
import itertools
from numbers import Number
import numpy as np
import scipy as sp
from odin.visual.plot_utils import tile_raster_images, to_axis
def plot_heatmap(data,
cmap="Blues",
ax=None,
xticklabels=None,
yticklabels=None,
xlabel=None,
ylabel=None,
cbar_title=None,
cbar=False,
fontsize=12,
gridline=0,
hide_spines=True,
annotation=None,
text_colors=dict(diag="black",
minrow=None,
mincol=None,
maxrow=None,
maxcol=None,
other="black"),
title=None):
r""" Showing heatmap matrix """
from matplotlib import pyplot as plt
ax = to_axis(ax, is_3D=False)
ax.grid(False)
fig = ax.get_figure()
# figsize = fig.get_size_inches()
# prepare labels
if xticklabels is None and yticklabels is not None:
xticklabels = ["X#%d" % i for i in range(data.shape[1])]
if yticklabels is None and xticklabels is not None:
yticklabels = ["Y#%d" % i for i in range(data.shape[0])]
# Plot the heatmap
im = ax.imshow(data,
interpolation='nearest',
cmap=cmap,
aspect='equal',
origin='upper')
# Create colorbar
if cbar:
cb = plt.colorbar(im, fraction=0.02, pad=0.02)
if cbar_title is not None:
cb.ax.set_ylabel(cbar_title, rotation=-90, va="bottom", fontsize=fontsize)
## major ticks
if xticklabels is not None and yticklabels is not None:
ax.set_xticks(np.arange(data.shape[1]))
ax.set_xticklabels(xticklabels, fontsize=fontsize)
ax.set_yticks(np.arange(data.shape[0]))
ax.set_yticklabels(list(yticklabels), fontsize=fontsize)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(),
rotation=-30,
ha="right",
rotation_mode="anchor")
else: # turn-off all ticks
ax.tick_params(top=False, bottom=False, labeltop=False, labelbottom=False)
## axis label
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=fontsize + 1)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=fontsize + 1)
## Turn spines off
if hide_spines:
for edge, spine in ax.spines.items():
spine.set_visible(False)
## minor ticks and create white grid.
# (if no minor ticks, the image will be cut-off)
ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)
ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)
if gridline > 0:
ax.grid(which="minor", color="w", linestyle='-', linewidth=gridline)
ax.tick_params(which="minor", bottom=False, left=False)
# set the title
if title is not None:
ax.set_title(str(title), fontsize=fontsize + 2, weight='semibold')
# prepare the annotation
if annotation is not None and annotation is not False:
if annotation is True:
annotation = np.array([['%.2g' % x for x in row] for row in data])
assert annotation.shape == data.shape
kw = dict(horizontalalignment="center",
verticalalignment="center",
fontsize=fontsize)
# np.log(max(2, np.mean(data.shape) - np.mean(figsize)))
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
minrow = text_colors.get('minrow', None)
maxrow = text_colors.get('maxrow', None)
mincol = text_colors.get('mincol', None)
maxcol = text_colors.get('maxcol', None)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
# basics text config
if i == j:
kw['weight'] = 'bold'
color = text_colors.get('diag', 'black')
else:
kw['weight'] = 'normal'
color = text_colors.get('other', 'black')
# min, max of row
if data[i, j] == min(data[i]) and minrow is not None:
color = minrow
elif data[i, j] == max(data[i]) and maxrow is not None:
color = maxrow
# min, max of column
if data[i, j] == min(data[:, j]) and mincol is not None:
color = mincol
elif data[i, j] == max(data[:, j]) and maxcol is not None:
color = maxcol
# show text
text = im.axes.text(j, i, annotation[i, j], color=color, **kw)
texts.append(text)
return ax
def plot_confusion_matrix(cm=None,
labels=None,
cmap="Blues",
ax=None,
fontsize=10,
cbar=False,
title=None,
y_true=None,
y_pred=None,
**kwargs):
r"""
cm : a square matrix of raw count
kwargs : arguments for `odin.visual.plot_heatmap`
"""
# TODO: new style for confusion matrix (using small and big dot)
if cm is None:
assert y_true is not None and y_pred is not None, \
"Provide either cm explicitly or y_true and y_pred together"
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true=y_true, y_pred=y_pred)
assert cm.shape[0] == cm.shape[1], \
"Plot confusion matrix only applied for squared matrix"
if labels is None:
labels = ['#%d' % i for i in range(max(cm.shape))]
# calculate F1
N_row = np.sum(cm, axis=-1)
N_col = np.sum(cm, axis=0)
TP = np.diagonal(cm)
FP = N_col - TP
FN = N_row - TP
precision = TP / (TP + FP)
recall = TP / (TP + FN)
F1 = 2 / (1 / precision + 1 / recall)
F1[np.isnan(F1)] = 0.
F1_mean = np.mean(F1)
# column normalize
nb_classes = cm.shape[0]
cm = cm.astype('float32') / np.sum(cm, axis=1, keepdims=True)
# generate annotation
annotation = np.empty(shape=(nb_classes, nb_classes), dtype=object)
for i, j in itertools.product(range(nb_classes), range(nb_classes)):
if i == j: # diagonal
text = '%.2f\nF1:%.2f' % (cm[i, j], F1[i])
else:
text = '%.2f' % cm[i, j]
annotation[i, j] = text
# plotting
return plot_heatmap(\
data=cm,
xticklabels=labels,
yticklabels=labels,
xlabel="Prediction",
ylabel="True",
cmap=cmap,
ax=ax,
fontsize=fontsize,
cbar=cbar,
cbar_title="Accuracy",
annotation=annotation,
text_colors=dict(diag='magenta', other='black', minrow='red'),
title='%s(F1: %.3f)' % ('' if title is None else str(title), F1_mean),
**kwargs)
def plot_Cnorm(cnorm,
labels,
Ptrue=[0.1, 0.5],
ax=None,
title=None,
fontsize=12):
from matplotlib import pyplot as plt
cmap = plt.cm.Blues
cnorm = cnorm.astype('float32')
if not isinstance(Ptrue, (tuple, list, np.ndarray)):
Ptrue = (Ptrue,)
Ptrue = [float(i) for i in Ptrue]
if len(Ptrue) != cnorm.shape[0]:
raise ValueError(
"`Cnorm` was calculated for %d Ptrue values, but given only "
"%d values for `Ptrue`: %s" % (cnorm.shape[0], len(Ptrue), str(Ptrue)))
ax = to_axis(ax, is_3D=False)
ax.imshow(cnorm, interpolation='nearest', cmap=cmap)
# axis.get_figure().colorbar(im)
ax.set_xticks(np.arange(len(labels)))
ax.set_yticks(np.arange(len(Ptrue)))
ax.set_xticklabels(labels, rotation=-57, fontsize=fontsize)
ax.set_yticklabels([str(i) for i in Ptrue], fontsize=fontsize)
ax.set_ylabel('Ptrue', fontsize=fontsize)
ax.set_xlabel('Predicted label', fontsize=fontsize)
# center text for value of each grid
for i, j in itertools.product(range(len(Ptrue)), range(len(labels))):
color = 'red'
weight = 'normal'
fs = fontsize
text = '%.2f' % cnorm[i, j]
plt.text(j,
i,
text,
weight=weight,
color=color,
fontsize=fs,
verticalalignment="center",
horizontalalignment="center")
# Turns off grid on the left Axis.
ax.grid(False)
title = "Cnorm: %.6f" % np.mean(cnorm) if title is None else \
"%s (Cnorm: %.6f)" % (str(title), np.mean(cnorm))
ax.set_title(title, fontsize=fontsize + 2, weight='semibold')
# axis.tight_layout()
return ax
def plot_weights(x, ax=None, colormap="Greys", cbar=False, keep_aspect=True):
r'''
Parameters
----------
x : np.ndarray
2D array
ax : matplotlib.Axis
create by fig.add_subplot, or plt.subplots
colormap : str
colormap alias from plt.cm.Greys = 'Greys' ('spectral')
plt.cm.gist_heat
cbar : bool, 'all'
whether adding cbar to plot, if cbar='all', call this
methods after you add all subplots will create big cbar
for all your plots
path : str
if path is specified, save png image to given path
Notes
-----
Make sure nrow and ncol in add_subplot is int or this error will show up
- ValueError: The truth value of an array with more than one element is
ambiguous. Use a.any() or a.all()
Example
-------
>>> x = np.random.rand(2000, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(2, 2, 1)
>>> dnntoolkit.visual.plot_weights(x, ax)
>>> ax = fig.add_subplot(2, 2, 2)
>>> dnntoolkit.visual.plot_weights(x, ax)
>>> ax = fig.add_subplot(2, 2, 3)
>>> dnntoolkit.visual.plot_weights(x, ax)
>>> ax = fig.add_subplot(2, 2, 4)
>>> dnntoolkit.visual.plot_weights(x, ax, path='/Users/trungnt13/tmp/shit.png')
>>> plt.show()
'''
from matplotlib import pyplot as plt
if colormap is None:
colormap = plt.cm.Greys
if x.ndim > 2:
raise ValueError('No support for > 2D')
elif x.ndim == 1:
x = x[:, None]
ax = ax if ax is not None else plt.gca()
if keep_aspect:
ax.set_aspect('equal', 'box')
# ax.tick_params(axis='both', which='major', labelsize=6)
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
ax.set_title(str(x.shape), fontsize=6)
img = ax.pcolorfast(x, cmap=colormap, alpha=0.8)
plt.grid(True)
if cbar == 'all':
fig = ax.get_figure()
axes = fig.get_axes()
fig.colorbar(img, ax=axes)
elif cbar:
plt.colorbar(img, ax=ax)
return ax
def plot_weights3D(x, colormap="Greys"):
'''
Example
-------
>>> # 3D shape
>>> x = np.random.rand(32, 28, 28)
>>> dnntoolkit.visual.plot_conv_weights(x)
'''
from matplotlib import pyplot as plt
if colormap is None:
colormap = plt.cm.Greys
shape = x.shape
if len(shape) == 3:
ncols = int(np.ceil(np.sqrt(shape[0])))
nrows = int(ncols)
else:
raise ValueError('This function only support 3D weights matrices')
fig = plt.figure()
count = 0
for i in range(nrows):
for j in range(ncols):
count += 1
# skip
if count > shape[0]:
continue
ax = fig.add_subplot(nrows, ncols, count)
# ax.set_aspect('equal', 'box')
ax.set_xticks([])
ax.set_yticks([])
if i == 0 and j == 0:
ax.set_xlabel('Width:%d' % x.shape[-1], fontsize=6)
ax.xaxis.set_label_position('top')
ax.set_ylabel('Height:%d' % x.shape[-2], fontsize=6)
ax.yaxis.set_label_position('left')
else:
ax.axis('off')
# image data: no idea why pcolorfast flip image vertically
img = ax.pcolorfast(x[count - 1][::-1, :], cmap=colormap, alpha=0.9)
# plt.grid(True)
plt.tight_layout()
# cbar
axes = fig.get_axes()
fig.colorbar(img, ax=axes)
return fig
def plot_weights4D(x, colormap="Greys"):
'''
Example
-------
>>> # 3D shape
>>> x = np.random.rand(32, 28, 28)
>>> dnntoolkit.visual.plot_conv_weights(x)
'''
from matplotlib import pyplot as plt
if colormap is None:
colormap = plt.cm.Greys
shape = x.shape
if len(shape) != 4:
raise ValueError('This function only support 4D weights matrices')
fig = plt.figure()
imgs = []
for i in range(shape[0]):
imgs.append(tile_raster_images(x[i], tile_spacing=(3, 3)))
ncols = int(np.ceil(np.sqrt(shape[0])))
nrows = int(ncols)
count = 0
for i in range(nrows):
for j in range(ncols):
count += 1
# skip
if count > shape[0]:
continue
ax = fig.add_subplot(nrows, ncols, count)
ax.set_aspect('equal', 'box')
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
# image data: no idea why pcolorfast flip image vertically
img = ax.pcolorfast(imgs[count - 1][::-1, :], cmap=colormap, alpha=0.9)
plt.tight_layout()
# colorbar
axes = fig.get_axes()
fig.colorbar(img, ax=axes)
return fig
def plot_distance_heatmap(X,
labels,
lognorm=True,
colormap='hot',
ax=None,
legend_enable=True,
legend_loc='upper center',
legend_ncol=3,
legend_colspace=0.2,
fontsize=10,
cbar=True,
title=None):
r"""
Arguments:
X : (n_samples, n_features). Coordination for scatter points
labels : (n_samples,). List of classes index or name
"""
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from odin import backend as K
# prepare data
X = K.length_norm(X, axis=-1, epsilon=np.finfo(X.dtype).eps)
ax = to_axis(ax)
n_samples, n_dim = X.shape
# processing labels
labels = np.array(labels).ravel()
assert labels.shape[0] == n_samples, "labels must be 1-D array."
is_continuous = isinstance(labels[0], Number) and int(labels[0]) != labels[0]
# float values label (normalize -1 to 1) or binary classification
if is_continuous:
min_val = np.min(labels)
max_val = np.max(labels)
labels = 2 * (labels - min_val) / (max_val - min_val) - 1
n_labels = 2
labels_name = {'-1': 0, '+1': 1}
else:
labels_name = {name: i for i, name in enumerate(np.unique(labels))}
n_labels = len(labels_name)
labels = np.array([labels_name[name] for name in labels])
# ====== sorting label and X ====== #
order_X = np.vstack(
[x for _, x in sorted(zip(labels, X), key=lambda pair: pair[0])])
order_label = np.vstack(
[y for y, x in sorted(zip(labels, X), key=lambda pair: pair[0])])
distance = sp.spatial.distance_matrix(order_X, order_X)
if bool(lognorm):
distance = np.log1p(distance)
min_non_zero = np.min(distance[np.nonzero(distance)])
distance = np.clip(distance, a_min=min_non_zero, a_max=np.max(distance))
# ====== convert data to image ====== #
cm = plt.get_cmap(colormap)
distance_img = cm(distance)
# diagonal black line (i.e. zero distance)
# for i in range(n_samples):
# distance_img[i, i] = (0, 0, 0, 1)
# labels colormap
width = max(int(0.032 * n_samples), 8)
if n_labels == 2:
cm = plt.get_cmap('bwr')
horz_bar = np.repeat(cm(order_label.T), repeats=width, axis=0)
vert_bar = np.repeat(cm(order_label), repeats=width, axis=1)
all_colors = np.array((cm(np.min(labels)), cm(np.max(labels))))
else: # use seaborn color palette here is better
cm = [i + (1.,) for i in sns.color_palette(n_colors=n_labels)]
c = np.stack([cm[i] for i in order_label.ravel()])
horz_bar = np.repeat(np.expand_dims(c, 0), repeats=width, axis=0)
vert_bar = np.repeat(np.expand_dims(c, 1), repeats=width, axis=1)
all_colors = cm
# image
final_img = np.zeros(shape=(n_samples + width, n_samples + width,
distance_img.shape[2]),
dtype=distance_img.dtype)
final_img[width:, width:] = distance_img
final_img[:width, width:] = horz_bar
final_img[width:, :width] = vert_bar
assert np.sum(final_img[:width, :width]) == 0, \
"Something wrong with my spacial coordination when writing this code!"
# ====== plotting ====== #
ax.imshow(final_img)
ax.axis('off')
# ====== legend ====== #
if bool(legend_enable):
legend_elements = [
Line2D([0], [0],
marker='o',
color=color,
label=name,
linewidth=0,
linestyle=None,
lw=0,
markerfacecolor=color,
markersize=fontsize // 2)
for color, name in zip(all_colors, labels_name.keys())
]
ax.legend(handles=legend_elements,
markerscale=1.,
scatterpoints=1,
scatteryoffsets=[0.375, 0.5, 0.3125],
loc=legend_loc,
bbox_to_anchor=(0.5, -0.01),
ncol=int(legend_ncol),
columnspacing=float(legend_colspace),
labelspacing=0.,
fontsize=fontsize - 1,
handletextpad=0.1)
# ====== final configurations ====== #
if title is not None:
ax.set_title(str(title), fontsize=fontsize)
if cbar:
from odin.visual import plot_colorbar
plot_colorbar(colormap,
vmin=np.min(distance),
vmax=np.max(distance),
ax=ax,
orientation='vertical')
return ax
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.