gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
########## API Actions ##########
# Notification Center
ACTION_DESCRIBE_NOTIFICATION_CENTER_USER_POSTS = "DescribeNotificationCenterUserPosts"
# zones
ACTION_DESCRIBE_ZONES = "DescribeZones"
# jobs
ACTION_DESCRIBE_JOBS = "DescribeJobs"
# images
ACTION_DESCRIBE_IMAGES = "DescribeImages"
ACTION_CAPTURE_INSTANCE = "CaptureInstance"
ACTION_DELETE_IMAGES = "DeleteImages"
ACTION_MODIFY_IMAGE_ATTRIBUTES = "ModifyImageAttributes"
# instances
ACTION_DESCRIBE_INSTANCES = "DescribeInstances"
ACTION_RUN_INSTANCES = "RunInstances"
ACTION_RUN_INSTANCES_BY_CONFIGURATION = "RunInstancesByConfiguration"
ACTION_TERMINATE_INSTANCES = "TerminateInstances"
ACTION_START_INSTANCES = "StartInstances"
ACTION_RESTART_INSTANCES = "RestartInstances"
ACTION_STOP_INSTANCES = "StopInstances"
ACTION_RESIZE_INSTANCES = "ResizeInstances"
ACTION_RESET_INSTANCES = "ResetInstances"
ACTION_MODIFY_INSTANCE_ATTRIBUTES = "ModifyInstanceAttributes"
# user data
ACTION_UPLOAD_USERDATA_ATTACHMENT = "UploadUserDataAttachment"
# volumes
ACTION_DESCRIBE_VOLUMES = "DescribeVolumes"
ACTION_CREATE_VOLUMES = "CreateVolumes"
ACTION_DELETE_VOLUMES = "DeleteVolumes"
ACTION_ATTACH_VOLUMES = "AttachVolumes"
ACTION_DETACH_VOLUMES = "DetachVolumes"
ACTION_RESIZE_VOLUMES = "ResizeVolumes"
ACTION_MODIFY_VOLUME_ATTRIBUTES = "ModifyVolumeAttributes"
# key pair
ACTION_DESCRIBE_KEY_PAIRS = "DescribeKeyPairs"
ACTION_CREATE_KEY_PAIR = "CreateKeyPair"
ACTION_DELETE_KEY_PAIRS = "DeleteKeyPairs"
ACTION_ATTACH_KEY_PAIRS = "AttachKeyPairs"
ACTION_DETACH_KEY_PAIRS = "DetachKeyPairs"
ACTION_MODIFY_KEYPAIR_ATTRIBUTES = "ModifyKeyPairAttributes"
# security group
ACTION_DESCRIBE_SECURITY_GROUPS = "DescribeSecurityGroups"
ACTION_CREATE_SECURITY_GROUP = "CreateSecurityGroup"
ACTION_MODIFY_SECURITY_GROUP_ATTRIBUTES = "ModifySecurityGroupAttributes"
ACTION_APPLY_SECURITY_GROUP = "ApplySecurityGroup"
ACTION_DELETE_SECURITY_GROUPS = "DeleteSecurityGroups"
ACTION_DESCRIBE_SECURITY_GROUP_RULES = "DescribeSecurityGroupRules"
ACTION_ADD_SECURITY_GROUP_RULES = "AddSecurityGroupRules"
ACTION_DELETE_SECURITY_GROUP_RULES = "DeleteSecurityGroupRules"
ACTION_MODIFY_SECURITY_GROUP_RULE_ATTRIBUTES = "ModifySecurityGroupRuleAttributes"
ACTION_DESCRIBE_SECURITY_GROUP_IPSETS = "DescribeSecurityGroupIPSets"
ACTION_CREATE_SECURITY_GROUP_IPSET = "CreateSecurityGroupIPSet"
ACTION_DELETE_SECURITY_GROUP_IPSETS = "DeleteSecurityGroupIPSets"
ACTION_MODIFY_SECURITY_GROUP_IPSET_ATTRIBUTES = "ModifySecurityGroupIPSetAttributes"
# vxnets
ACTION_DESCRIBE_VXNETS = "DescribeVxnets"
ACTION_CREATE_VXNETS = "CreateVxnets"
ACTION_DELETE_VXNETS = "DeleteVxnets"
ACTION_JOIN_VXNET = "JoinVxnet"
ACTION_LEAVE_VXNET = "LeaveVxnet"
ACTION_MODIFY_VXNET_ATTRIBUTES = "ModifyVxnetAttributes"
ACTION_DESCRIBE_VXNET_INSTANCES = "DescribeVxnetInstances"
# router
ACTION_CREATE_ROUTERS = "CreateRouters"
ACTION_UPDATE_ROUTERS = "UpdateRouters"
ACTION_DELETE_ROUTERS = "DeleteRouters"
ACTION_JOIN_ROUTER = "JoinRouter"
ACTION_LEAVE_ROUTER = "LeaveRouter"
ACTION_POWEROFF_ROUTERS = "PowerOffRouters"
ACTION_POWERON_ROUTERS = "PowerOnRouters"
ACTION_DESCRIBE_ROUTERS = "DescribeRouters"
ACTION_DESCRIBE_ROUTER_VXNETS = "DescribeRouterVxnets"
ACTION_MODIFY_ROUTER_ATTRIBUTES = "ModifyRouterAttributes"
ACTION_MODIFY_ROUTER_STATIC_ATTRIBUTES = "ModifyRouterStaticAttributes"
ACTION_DESCRIBE_ROUTER_STATICS = "DescribeRouterStatics"
ACTION_ADD_ROUTER_STATICS = "AddRouterStatics"
ACTION_DELETE_ROUTER_STATICS = "DeleteRouterStatics"
ACTION_MODIFY_ROUTER_STATIC_ENTRY_ATTRIBUTES = "ModifyRouterStaticEntryAttributes"
ACTION_DESCRIBE_ROUTER_STATIC_ENTRIES = "DescribeRouterStaticEntries"
ACTION_ADD_ROUTER_STATIC_ENTRIES = "AddRouterStaticEntries"
ACTION_DELETE_ROUTER_STATIC_ENTRIES = "DeleteRouterStaticEntries"
# eip
ACTION_ASSOCIATE_EIP = "AssociateEip"
ACTION_DISSOCIATE_EIPS = "DissociateEips"
ACTION_ALLOCATE_EIPS = "AllocateEips"
ACTION_RELEASE_EIPS = "ReleaseEips"
ACTION_DESCRIBE_EIPS = "DescribeEips"
ACTION_MODIFY_EIP_ATTRIBUTES = "ModifyEipAttributes"
ACTION_CHANGE_EIPS_BANDWIDTH = "ChangeEipsBandwidth"
ACTION_CHANGE_EIPS_BILLING_MODE = "ChangeEipsBillingMode"
# dns alias
ACTION_DESCRIBE_DNS_ALIASES = "DescribeDNSAliases"
ACTION_ASSOCIATE_DNS_ALIAS = "AssociateDNSAlias"
ACTION_DISSOCIATE_DNS_ALIASES = "DissociateDNSAliases"
ACTION_GET_DNS_LABEL = "GetDNSLabel"
# lb
ACTION_DESCRIBE_LOADBALANCERS = "DescribeLoadBalancers"
ACTION_CREATE_LOADBALANCER = "CreateLoadBalancer"
ACTION_DELETE_LOADBALANCERS = "DeleteLoadBalancers"
ACTION_ASSOCIATE_EIPS_TO_LOADBALANCER = "AssociateEipsToLoadBalancer"
ACTION_DISSOCIATE_EIPS_FROM_LOADBALANCER = "DissociateEipsFromLoadBalancer"
ACTION_UPDATE_LOADBALANCERS = "UpdateLoadBalancers"
ACTION_STOP_LOADBALANCERS = "StopLoadBalancers"
ACTION_START_LOADBALANCERS = "StartLoadBalancers"
ACTION_MODIFY_LOADBALANCER_ATTRIBUTES = "ModifyLoadBalancerAttributes"
ACTION_DESCRIBE_LOADBALANCER_LISTENERS = "DescribeLoadBalancerListeners"
ACTION_ADD_LOADBALANCER_LISTENERS = "AddLoadBalancerListeners"
ACTION_DELETE_LOADBALANCER_LISTENERS = "DeleteLoadBalancerListeners"
ACTION_MODIFY_LOADBALANCER_LISTENER_ATTRIBUTES = "ModifyLoadBalancerListenerAttributes"
ACTION_ADD_LOADBALANCER_BACKENDS = "AddLoadBalancerBackends"
ACTION_DELETE_LOADBALANCER_BACKENDS = "DeleteLoadBalancerBackends"
ACTION_MODIFY_LOADBALANCER_BACKEND_ATTRIBUTES = "ModifyLoadBalancerBackendAttributes"
ACTION_DESCRIBE_LOADBALANCER_BACKENDS = "DescribeLoadBalancerBackends"
# monitor
ACTION_GET_MONITOR = "GetMonitor"
ACTION_GET_LOADBALANCER_MONITOR = "GetLoadBalancerMonitor"
# snapshot
ACTION_CREATE_SNAPSHOTS = "CreateSnapshots"
ACTION_DELETE_SNAPSHOTS = "DeleteSnapshots"
ACTION_APPLY_SNAPSHOTS = "ApplySnapshots"
ACTION_DESCRIBE_SNAPSHOTS = "DescribeSnapshots"
ACTION_MODIFY_SNAPSHOT_ATTRIBUTES = "ModifySnapshotAttributes"
ACTION_CAPTURE_INSTANCE_FROM_SNAPSHOT = "CaptureInstanceFromSnapshot"
ACTION_CREATE_VOLUME_FROM_SNAPSHOT = "CreateVolumeFromSnapshot"
# rdb
ACTION_DESCRIBE_RDBS = "DescribeRDBs"
ACTION_CREATE_RDB = "CreateRDB"
ACTION_RESIZE_RDBS = "ResizeRDBs"
ACTION_START_RDBS = "StartRDBs"
ACTION_STOP_RDBS = "StopRDBs"
# mongo
ACTION_DESCRIBE_MONGOS = "DescribeMongos"
ACTION_RESIZE_MONGOS = "ResizeMongos"
ACTION_START_MONGOS = "StartMongos"
ACTION_STOP_MONGOS = "StopMongos"
# cache
ACTION_DESCRIBE_CACHES = "DescribeCaches"
ACTION_CREATE_CACHE = "CreateCache"
ACTION_RESIZE_CACHES = "ResizeCaches"
ACTION_START_CACHES = "StartCaches"
ACTION_STOP_CACHES = "StopCaches"
# spark
ACTION_DESCRIBE_SPARKS = "DescribeSparks"
ACTION_START_SPARKS = "StartSparks"
ACTION_STOP_SPARKS = "StopSparks"
ACTION_ADD_SPARK_NODES = "AddSparkNodes"
ACTION_DELETE_SPARK_NODES = "DeleteSparkNodes"
ACTION_CREATE_SPARK = "CreateSpark"
ACTION_DELETE_SPARKS = "DeleteSparks"
# hadoop
ACTION_DESCRIBE_HADOOPS = "DescribeHadoops"
ACTION_START_HADOOPS = "StartHadoops"
ACTION_STOP_HADOOPS = "StopHadoops"
ACTION_ADD_HADOOP_NODES = "AddHadoopNodes"
ACTION_DELETE_HADOOP_NODES = "DeleteHadoopNodes"
ACTION_CREATE_HADOOP = "CreateHadoop"
ACTION_DELETE_HADOOPS = "DeleteHadoops"
# zk
ACTION_DESCRIBE_ZOOKEEPERS = "DescribeZookeepers"
ACTION_START_ZOOKEEPERS = "StartZookeepers"
ACTION_STOP_ZOOKEEPERS = "StopZookeepers"
# queue
ACTION_DESCRIBE_QUEUES = "DescribeQueues"
ACTION_START_QUEUES = "StartQueues"
ACTION_STOP_QUEUES = "StopQueues"
# tag
ACTION_DESCRIBE_TAGS = "DescribeTags"
ACTION_CREATE_TAG = "CreateTag"
ACTION_DELETE_TAGS = "DeleteTags"
ACTION_MODIFY_TAG_ATTRIBUTES = "ModifyTagAttributes"
ACTION_ATTACH_TAGS = "AttachTags"
ACTION_DETACH_TAGS = "DetachTags"
########## Constants for resource ##########
# sg
DIRECTION_EGRESS = 1
DIRECTION_INGRESS = 0
# vxnet
VXNET_TYPE_MANAGED = 1
VXNET_TYPE_UNMANAGED = 0
# lb
BALANCE_ROUNDROBIN = "roundrobin"
BALANCE_LEASTCONN = "leastconn"
HEADER_X_FORWARD_FOR = 1
HEADER_QC_LBID = 2
HEADER_QC_LBIP = 4
LB_TYPE_MAXCONN_5k = 0
LB_TYPE_MAXCONN_20k = 1
LB_TYPE_MAXCONN_40k = 2
LB_TYPE_MAXCONN_100k = 3
LB_TYPE_MAXCONN_200k = 4
LB_TYPE_MAXCONN_500k = 5
# eip
EIP_BILLING_MODE_BANDWIDTH = "bandwidth"
EIP_BILLING_MODE_TRAFFIC = "traffic"
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@test_util.run_v1_only("RandomShuffleQueue removed from v2")
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size())
enqueue_op.run()
self.assertAllEqual(1, q.size())
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = self.evaluate(dequeue_t)
results.append((a, b))
a, b = self.evaluate(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = self.evaluate(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], self.evaluate(size))
dequeued_t.op.run()
self.assertEqual([0], self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, self.evaluate(size_t))
enqueue_op.run()
self.assertEqual(0, self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testEmptyDequeueUpToWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = self.evaluate(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.cached_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(self.evaluate(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
def blocking_dequeue():
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, self.evaluate(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(3, len(results))
results.extend(self.evaluate(dequeued_t))
self.assertEqual(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(self.evaluate(dequeued_t))
self.assertEqual(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(self.evaluate(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
self.evaluate(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = self.evaluate(size_t)
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegex(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
def testResetOfBlockingOperation(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
| |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
from . import models
from .rest import RESTClientObject
from .rest import ApiException
import os
import re
import sys
import urllib
import json
import mimetypes
import random
import tempfile
import threading
from datetime import datetime
from datetime import date
# python 2 and python 3 compatibility library
from six import iteritems
try:
# for python3
from urllib.parse import quote
except ImportError:
# for python2
from urllib import quote
from .configuration import Configuration
class ApiClient(object):
"""
Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param host: The base path for the server to call.
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to the API.
"""
def __init__(self, host=None, header_name=None, header_value=None, cookie=None):
"""
Constructor of the class.
"""
self.rest_client = RESTClientObject()
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
if host is None:
self.host = Configuration().host
else:
self.host = host
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Python-Swagger/0.2'
@property
def user_agent(self):
"""
Gets user agent.
"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
"""
Sets user agent.
"""
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None):
# headers parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
for k, v in iteritems(path_params):
replacement = quote(str(self.to_path_value(v)))
resource_path = resource_path.\
replace('{' + k + '}', replacement)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = {k: self.to_path_value(v)
for k, v in iteritems(query_params)}
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.host + resource_path
# perform request and return response
response_data = self.request(method, url,
query_params=query_params,
headers=header_params,
post_params=post_params, body=body)
self.last_response = response_data
# deserialize response data
if response_type:
deserialized_data = self.deserialize(response_data, response_type)
else:
deserialized_data = None
if callback:
callback(deserialized_data)
else:
return deserialized_data
def to_path_value(self, obj):
"""
Takes value and turn it into a string suitable for inclusion in
the path, by url-encoding.
:param obj: object or string value.
:return string: quoted value.
"""
if type(obj) == list:
return ','.join(obj)
else:
return str(obj)
def sanitize_for_serialization(self, obj):
"""
Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
types = (str, int, float, bool, tuple)
if sys.version_info < (3,0):
types = types + (unicode,)
if isinstance(obj, type(None)):
return None
elif isinstance(obj, types):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
else:
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""
Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialzied object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if "file" == response_type:
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def deserialize_json(self, json_data, klass):
"""
Deserializes JSON into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
return self.__deserialize(json_data, klass)
def __deserialize(self, data, klass):
"""
Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match('list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in iteritems(data)}
# convert str to class
# for native types
if klass in ['int', 'float', 'str', 'bool', 'bytearray',
"date", 'datetime', "object"]:
klass = eval(klass)
# for model types
else:
klass = eval('models.' + klass)
if klass in [int, float, str, bool, bytearray]:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == date:
return self.__deserialize_date(data)
elif klass == datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None):
"""
Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, define a function for callback.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param callback function: Callback function for asynchronous request.
If provide this parameter,
the request will be called asynchronously.
:return:
If provide parameter callback,
the request will be called asynchronously.
The method will return the request thread.
If parameter callback is None,
then the method will return the response directly.
"""
if callback is None:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings, callback)
else:
thread = threading.Thread(target=self.__call_api,
args=(resource_path, method,
path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
callback))
thread.start()
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None):
"""
Makes the HTTP request using RESTClient.
"""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers)
else:
raise ValueError(
"http method must be `GET`, `HEAD`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def prepare_post_parameters(self, post_params=None, files=None):
"""
Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = {}
if post_params:
params.update(post_params)
if files:
for k, v in iteritems(files):
if not v:
continue
with open(v, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = mimetypes.\
guess_type(filename)[0] or 'application/octet-stream'
params[k] = tuple([filename, filedata, mimetype])
return params
def select_header_accept(self, accepts):
"""
Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = list(map(lambda x: x.lower(), accepts))
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""
Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = list(map(lambda x: x.lower(), content_types))
if 'application/json' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters dict to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
config = Configuration()
if not auth_settings:
return
for auth in auth_settings:
auth_setting = config.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys[auth_setting['key']] = auth_setting['value']
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
config = Configuration()
fd, path = tempfile.mkstemp(dir=config.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.\
search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).\
group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "w") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""
Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, float, str, bool.
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = unicode(data)
except TypeError:
value = data
return value
def __deserialize_object(self, value):
"""
Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""
Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a date object"
.format(string)
)
def __deserialize_datatime(self, string):
"""
Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a datetime object".
format(string)
)
def __deserialize_model(self, data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
instance = klass()
for attr, attr_type in iteritems(instance.swagger_types):
if data is not None \
and instance.attribute_map[attr] in data\
and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
setattr(instance, attr, self.__deserialize(value, attr_type))
return instance
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six.moves import zip_longest
from io import StringIO
import io
import os
import unittest
import warnings
import types
from tempfile import mkstemp
from skbio.io import (FormatIdentificationWarning, UnrecognizedFormatError,
ArgumentOverrideWarning, io_registry, sniff,
create_format)
from skbio.io.registry import (IORegistry, FileSentinel, Format,
DuplicateRegistrationError,
InvalidRegistrationError)
from skbio.util import TestingUtilError, get_data_path
from skbio import DNA, read, write
class TestClass(object):
def __init__(self, l):
self.list = l
def __eq__(self, other):
# They are only equal when the class is EXACTLY the same. We don't want
# readers to return knockoff instances...
return self.__class__ is other.__class__ and self.list == other.list
def __repr__(self):
return "%s(%s)" % (str(self.__class__.__name__), str(self.list))
class TestClassA(TestClass):
pass
class TestClassB(TestClass):
pass
class TestFormatAndIORegistry(unittest.TestCase):
def test_add_duplicate_format(self):
f = Format('Example')
r = IORegistry()
r.add_format(f)
with self.assertRaises(DuplicateRegistrationError):
r.add_format(Format('Example'))
class RegistryTest(unittest.TestCase):
def setUp(self):
self.registry = IORegistry()
self.fd1, self.fp1 = mkstemp()
self.fd2, self.fp2 = mkstemp()
def tearDown(self):
os.remove(self.fp1)
os.close(self.fd1)
os.remove(self.fp2)
os.close(self.fd2)
class TestRegisterAndGetReader(RegistryTest):
def test_get_reader_no_match(self):
self.assertIs(None, self.registry.get_reader('not_a_format',
TestClass))
def test_get_reader_when_only_writer_exists(self):
format1 = self.registry.create_format('format1')
@format1.writer(TestClass)
def format_writer(fh):
return
self.assertEqual(None, self.registry.get_reader('format', TestClass))
def test_register_reader_on_many(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3')
format4 = self.registry.create_format('format4', encoding='binary')
format5 = self.registry.create_format('format5', encoding='binary')
@format1.reader(TestClassA)
def format1_reader(fh):
return
@format1.reader(TestClassB)
def format1_reader_b(fh):
return
@format2.reader(TestClassA)
def format2_reader(fh):
return
@format3.reader(TestClassB)
def format3_reader(fh):
return
@format4.reader(TestClassA)
def format4_reader(fh):
return
@format4.reader(TestClassB)
def format4_reader_b(fh):
return
@format5.reader(None)
def format5_reader(fh):
return
self.assertIs(format1_reader,
self.registry.get_reader('format1', TestClassA))
self.assertIs(format1_reader_b,
self.registry.get_reader('format1', TestClassB))
self.assertIs(format2_reader,
self.registry.get_reader('format2', TestClassA))
self.assertIs(None, self.registry.get_reader('format2', TestClassB))
self.assertIs(None, self.registry.get_reader('format3', TestClassA))
self.assertIs(format3_reader,
self.registry.get_reader('format3', TestClassB))
self.assertIs(format4_reader,
self.registry.get_reader('format4', TestClassA))
self.assertIs(format4_reader_b,
self.registry.get_reader('format4', TestClassB))
self.assertIs(format5_reader,
self.registry.get_reader('format5', None))
self.assertIs(None, self.registry.get_reader('format5', TestClassA))
self.assertIs(None, self.registry.get_reader('format5', TestClassB))
def test_register_reader_over_existing(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(DuplicateRegistrationError) as cm:
@format1.reader(TestClassA)
def format1_reader(fh):
return
@format1.reader(TestClassA)
def duplicate_format1_reader(fh):
return
self.assertTrue('format1' in str(cm.exception))
self.assertTrue('reader' in str(cm.exception))
self.assertTrue(TestClassA.__name__ in str(cm.exception))
def test_register_reader_over_existing_override(self):
format1 = self.registry.create_format('format1')
@format1.reader(TestClassA)
def format1_reader(fh):
return
self.assertIs(format1_reader,
self.registry.get_reader('format1', TestClassA))
@format1.reader(TestClassA, override=True)
def duplicate_format1_reader(fh):
return
self.assertIs(duplicate_format1_reader,
self.registry.get_reader('format1', TestClassA))
def test_mistype_reader_registration(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(InvalidRegistrationError):
@format1.reader
def left_out_parens(fh):
return
class TestRegisterAndGetWriter(RegistryTest):
def test_get_writer_no_match(self):
self.assertEqual(None, self.registry.get_writer('not_a_format',
TestClass))
def test_get_writer_when_only_reader_exists(self):
format = self.registry.create_format('format')
@format.reader(TestClass)
def format_reader(fh):
return
self.assertEqual(None, self.registry.get_writer('format', TestClass))
def test_register_writer_on_many(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3')
format4 = self.registry.create_format('format4', encoding='binary')
format5 = self.registry.create_format('format5', encoding='binary')
@format1.writer(TestClassA)
def format1_writer(obj, fh):
return
@format1.writer(TestClassB)
def format1_writer_b(obj, fh):
return
@format2.writer(TestClassA)
def format2_writer(obj, fh):
return
@format3.writer(TestClassB)
def format3_writer(obj, fh):
return
@format4.writer(TestClassA)
def format4_writer(fh):
return
@format4.writer(TestClassB)
def format4_writer_b(fh):
return
@format5.writer(None)
def format5_writer(fh):
return
self.assertEqual(format1_writer,
self.registry.get_writer('format1', TestClassA))
self.assertEqual(format1_writer_b,
self.registry.get_writer('format1', TestClassB))
self.assertEqual(format2_writer,
self.registry.get_writer('format2', TestClassA))
self.assertEqual(None,
self.registry.get_writer('format2', TestClassB))
self.assertEqual(None,
self.registry.get_writer('format3', TestClassA))
self.assertEqual(format3_writer,
self.registry.get_writer('format3', TestClassB))
self.assertIs(format4_writer,
self.registry.get_writer('format4', TestClassA))
self.assertIs(format4_writer_b,
self.registry.get_writer('format4', TestClassB))
self.assertIs(format5_writer,
self.registry.get_writer('format5', None))
self.assertIs(None, self.registry.get_writer('format5', TestClassA))
self.assertIs(None, self.registry.get_writer('format5', TestClassB))
def test_register_writer_over_existing(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(DuplicateRegistrationError) as cm:
@format1.writer(TestClassA)
def format1_writer(obj, fh):
return
@format1.writer(TestClassA)
def duplicate_format1_writer(obj, fh):
return
self.assertTrue('format1' in str(cm.exception))
self.assertTrue('writer' in str(cm.exception))
self.assertTrue(TestClassA.__name__ in str(cm.exception))
def test_register_writer_over_existing_override(self):
format1 = self.registry.create_format('format1')
@format1.writer(TestClassA)
def format1_writer(obj, fh):
return
self.assertIs(format1_writer,
self.registry.get_writer('format1', TestClassA))
@format1.writer(TestClassA, override=True)
def duplicate_format1_writer(obj, fh):
return
self.assertIs(duplicate_format1_writer,
self.registry.get_writer('format1', TestClassA))
def test_mistype_writer_registration(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(InvalidRegistrationError):
@format1.writer
def left_out_parens(fh):
return
class TestRegisterAndGetSniffer(RegistryTest):
def test_get_sniffer_no_match(self):
self.assertEqual(None, self.registry.get_sniffer('not_a_format'))
def test_register_sniffer_on_many(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3', encoding='binary')
@format1.sniffer()
def format1_sniffer(fh):
return '1' in fh.readline(), {}
@format2.sniffer()
def format2_sniffer(fh):
return '2' in fh.readline(), {}
@format3.sniffer()
def format3_sniffer(fh):
return '3' in fh.readline(), {}
self.assertEqual(format1_sniffer,
self.registry.get_sniffer('format1'))
self.assertEqual(format2_sniffer,
self.registry.get_sniffer('format2'))
self.assertEqual(format3_sniffer,
self.registry.get_sniffer('format3'))
def test_register_sniffer_over_existing(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(DuplicateRegistrationError) as cm:
@format1.sniffer()
def format1_sniffer(fh):
return False, {}
@format1.sniffer()
def duplicate_format1_sniffer(fh):
return False, {}
self.assertTrue('format1' in str(cm.exception))
def test_register_sniffer_over_existing_override(self):
format1 = self.registry.create_format('format1')
@format1.sniffer()
def format1_sniffer(fh):
return False, {}
self.assertIs(self.registry.get_sniffer('format1'), format1_sniffer)
@format1.sniffer(override=True)
def duplicate_format1_sniffer(fh):
return False, {}
self.assertIs(self.registry.get_sniffer('format1'),
duplicate_format1_sniffer)
def test_sniffer_warns_on_exception(self):
format = self.registry.create_format('format')
@format.sniffer()
def format_sniffer(fh):
raise TestingUtilError("Sniffer will return False and warn.")
fh = StringIO()
sniffer = self.registry.get_sniffer('format')
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
with self.assertRaises(FormatIdentificationWarning):
sniffer(fh)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore")
result, kwargs = sniffer(fh)
self.assertFalse(result)
self.assertEqual({}, kwargs)
fh.close()
def test_mistype_sniffer_registration(self):
format1 = self.registry.create_format('format1')
with self.assertRaises(InvalidRegistrationError):
@format1.sniffer
def left_out_parens(fh):
return
class TestListReadFormats(RegistryTest):
def test_no_read_formats(self):
format1 = self.registry.create_format('format1')
@format1.reader(TestClassA)
def this_isnt_on_clsB(fh):
return
self.assertEqual([], self.registry.list_read_formats(TestClassB))
def test_one_read_format(self):
format1 = self.registry.create_format('format1')
@format1.reader(TestClass)
def format1_cls(fh):
return
self.assertEqual(['format1'],
self.registry.list_read_formats(TestClass))
def test_many_read_formats(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3', encoding='binary')
format4 = self.registry.create_format('format4')
format5 = self.registry.create_format('format5', encoding='binary')
@format1.reader(TestClassA)
def format1_clsA(fh):
return
@format2.reader(TestClassA)
def format2_clsA(fh):
return
@format3.reader(TestClassA)
def format3_clsA(fh):
return
@format3.reader(TestClassB)
def format3_clsB(fh):
return
@format4.reader(TestClassB)
def format4_clsB(fh):
return
@format5.writer(TestClassA)
def format5_clsA(fh):
return
formats = self.registry.list_read_formats(TestClassA)
self.assertTrue('format1' in formats)
self.assertTrue('format2' in formats)
self.assertTrue('format3' in formats)
self.assertTrue('format4' not in formats)
self.assertTrue('format5' not in formats)
class TestListWriteFormats(RegistryTest):
def test_no_write_formats(self):
format1 = self.registry.create_format('format1')
@format1.writer(TestClassA)
def this_isnt_on_clsB(fh):
return
self.assertEqual([], self.registry.list_write_formats(TestClassB))
def test_one_write_format(self):
format1 = self.registry.create_format('format1')
@format1.writer(TestClass)
def format1_cls(fh):
return
self.assertEqual(['format1'],
self.registry.list_write_formats(TestClass))
def test_many_write_formats(self):
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3', encoding='binary')
format4 = self.registry.create_format('format4')
format5 = self.registry.create_format('format5', encoding='binary')
@format1.writer(TestClassA)
def format1_clsA(fh):
return
@format2.writer(TestClassA)
def format2_clsA(fh):
return
@format3.writer(TestClassA)
def format3_clsA(fh):
return
@format3.writer(TestClassB)
def format3_clsB(fh):
return
@format4.writer(TestClassB)
def format4_clsB(fh):
return
@format5.reader(TestClassA)
def format5_clsA(fh):
return
formats = self.registry.list_write_formats(TestClassA)
self.assertTrue('format1' in formats)
self.assertTrue('format2' in formats)
self.assertTrue('format3' in formats)
self.assertTrue('format4' not in formats)
self.assertTrue('format5' not in formats)
class TestSniff(RegistryTest):
def setUp(self):
super(TestSniff, self).setUp()
format1 = self.registry.create_format('format1')
format2 = self.registry.create_format('format2')
format3 = self.registry.create_format('format3')
format4 = self.registry.create_format('format4')
# No sniffer for this format:
self.registry.create_format('format5')
@format1.sniffer()
def format1_sniffer(fh):
return '1' in fh.readline(), {}
@format2.sniffer()
def format2_sniffer(fh):
return '2' in fh.readline(), {}
@format3.sniffer()
def format3_sniffer(fh):
return '3' in fh.readline(), {}
@format4.sniffer()
def format4_sniffer(fh):
return '4' in fh.readline(), {}
@format3.reader(TestClass)
def reader3(fh):
return
@format4.reader(TestClass)
def reader4(fh):
return
def test_no_matches(self):
fh = StringIO(u"no matches here")
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.sniff(fh)
self.assertTrue(str(fh) in str(cm.exception))
fh.close()
def test_one_match(self):
fh = StringIO(u"contains a 3")
self.assertEqual('format3', self.registry.sniff(fh)[0])
def test_many_matches(self):
fh = StringIO(u"1234 will match all")
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.sniff(fh)
self.assertTrue("format1" in str(cm.exception))
self.assertTrue("format2" in str(cm.exception))
self.assertTrue("format3" in str(cm.exception))
self.assertTrue("format4" in str(cm.exception))
fh.close()
def test_that_encoding_is_used(self):
formatx = self.registry.create_format('formatx')
fp = get_data_path('big5_file')
@formatx.sniffer()
def sniffer(fh):
self.assertEqual('big5', fh.encoding)
return True, {}
fmt, _ = self.registry.sniff(fp, encoding='big5')
self.assertEqual(fmt, 'formatx')
def test_that_newline_is_used(self):
formatx = self.registry.create_format('formatx')
fp = get_data_path('real_file')
@formatx.sniffer()
def sniffer(fh):
self.assertEqual(fh.readlines(), ['a\nb\nc\nd\ne\n'])
return True, {}
fmt, _ = self.registry.sniff(fp, newline='\r')
self.assertEqual(fmt, 'formatx')
def test_non_default_encoding(self):
big5_format = self.registry.create_format('big5_format',
encoding='big5')
@big5_format.sniffer()
def sniffer(fh):
self.assertEqual(self._expected_encoding, fh.encoding)
return True, {}
self._expected_encoding = 'big5'
fmt, _ = self.registry.sniff(self.fp1)
self.assertEqual(fmt, 'big5_format')
self._expected_encoding = 'UTF-8'
fmt, _ = self.registry.sniff(self.fp1, encoding='UTF-8')
self.assertEqual(fmt, 'big5_format')
def test_non_default_newline(self):
formatx = self.registry.create_format('formatx', newline='\r')
fp = get_data_path('real_file')
@formatx.sniffer()
def sniffer(fh):
self.assertEqual(fh.readlines(), self._expected_lines)
return True, {}
self._expected_lines = ['a\nb\nc\nd\ne\n']
fmt, _ = self.registry.sniff(fp)
self.assertEqual(fmt, 'formatx')
self._expected_lines = ['a\n', 'b\n', 'c\n', 'd\n', 'e\n']
fmt, _ = self.registry.sniff(fp, newline=None)
self.assertEqual(fmt, 'formatx')
def test_position_not_mutated_real_file(self):
formatx = self.registry.create_format('formatx')
@formatx.sniffer()
def sniffer(fh):
return True, {}
with io.open(get_data_path('real_file')) as fh:
fh.seek(2)
self.registry.sniff(fh)
self.assertEqual(fh.tell(), 2)
self.assertEqual('b\n', fh.readline())
def test_position_not_mutated_fileish(self):
formatx = self.registry.create_format('formatx')
@formatx.sniffer()
def sniffer(fh):
return True, {}
fh = StringIO(u'a\nb\nc\nd\n')
fh.seek(2)
self.registry.sniff(fh)
self.assertEqual('b\n', fh.readline())
def test_sniff_with_errors_in_sniffer(self):
formatx = self.registry.create_format('formatx', encoding='ascii')
@formatx.sniffer()
def sniffer(fh):
raise Exception("OH NO!")
fp = get_data_path('big5_file')
with warnings.catch_warnings(record=True):
warnings.simplefilter('error')
with self.assertRaises(FormatIdentificationWarning):
fmt, _ = self.registry.sniff(fp)
def test_sniff_with_encoding_errors(self):
formatx = self.registry.create_format('formatx', encoding='ascii')
@formatx.sniffer()
def sniffer(fh):
fh.read()
return True, {}
fp = get_data_path('big5_file')
with self.assertRaises(UnrecognizedFormatError):
fmt, _ = self.registry.sniff(fp, errors='strict')
# errors is set to ignore by default, so our sniffer will return
# true even though read() didn't entirely work for ascii
fmt, _ = self.registry.sniff(fp)
self.assertEqual(fmt, 'formatx')
def test_binary_sniffer(self):
binf = self.registry.create_format('binf', encoding='binary')
@binf.sniffer()
def sniffer(fh):
self.assertIsInstance(fh, (io.BufferedReader, io.BufferedRandom))
return True, {}
fmt, _ = self.registry.sniff(self.fp1)
self.assertEqual(fmt, 'binf')
def test_text_sniffer(self):
textf = self.registry.create_format('textf', encoding=None)
@textf.sniffer()
def sniffer(fh):
self.assertIsInstance(fh, io.TextIOBase)
return True, {}
fmt, _ = self.registry.sniff(self.fp1)
self.assertEqual(fmt, 'textf')
def test_sniff_with_illegal_encoding(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
return True, {}
@textf.sniffer()
def textf_sniffer(fh):
return True, {}
# Should skip binary sniffers
fmt, _ = self.registry.sniff(self.fp1, encoding=None)
self.assertEqual(fmt, 'textf')
# Should skip text sniffers
fmt, _ = self.registry.sniff(self.fp1, encoding='binary')
self.assertEqual(fmt, 'binf')
with self.assertRaises(ValueError):
self.registry.sniff([u'some content\n'], encoding='binary')
with self.assertRaises(ValueError):
binf_sniffer(self.fp1, encoding=None)
with self.assertRaises(ValueError):
textf_sniffer(self.fp1, encoding='binary')
def test_binary_fall_through(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
self._check_binf = True
return False, {}
@textf.sniffer()
def textf_sniffer(fh):
self._check_textf = True
return True, {}
self._check_binf = False
self._check_textf = False
fmt, _ = self.registry.sniff(self.fp1)
self.assertEqual(fmt, 'textf')
self.assertTrue(self._check_binf)
self.assertTrue(self._check_textf)
def test_sniff_gzip(self):
expected = u"This is some content\nIt occurs on more than one line\n"
formata = self.registry.create_format('formata', encoding='binary')
formatb = self.registry.create_format('formatb')
formatc = self.registry.create_format('formatc')
@formata.sniffer()
def formata_sniffer(fh):
self._check_f1 = True
self.assertEqual(fh.read(), expected.encode('ascii'))
return False, {}
@formatb.sniffer()
def formatb_sniffer(fh):
self._check_f2 = True
self.assertEqual(fh.read(), expected)
return True, {}
@formatc.sniffer()
def formatc_sniffer(fh):
self._check_f3 = True
self.assertEqual(fh.read(), expected)
return False, {}
self._check_f1 = False
self._check_f2 = False
self._check_f3 = False
self.registry.sniff(get_data_path('example_file.gz'))
self.assertTrue(self._check_f1)
self.assertTrue(self._check_f2)
self.assertTrue(self._check_f3)
def test_text_skip_binary(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
self._check_binf = True
return True, {}
@textf.sniffer()
def textf_sniffer(fh):
self._check_textf = True
return True, {}
self._check_binf = False
self._check_textf = False
fmt, _ = self.registry.sniff([u'text'])
self.assertEqual(fmt, 'textf')
self.assertFalse(self._check_binf)
self.assertTrue(self._check_textf)
self._check_binf = False
self._check_textf = False
fmt, _ = self.registry.sniff(self.fp1, encoding=None)
self.assertEqual(fmt, 'textf')
self.assertFalse(self._check_binf)
self.assertTrue(self._check_textf)
def test_text_skip_text(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
self._check_binf = True
return True, {}
@textf.sniffer()
def textf_sniffer(fh):
self._check_textf = True
return True, {}
self._check_binf = False
self._check_textf = False
fmt, _ = self.registry.sniff(self.fp1, encoding='binary')
self.assertEqual(fmt, 'binf')
self.assertTrue(self._check_binf)
self.assertFalse(self._check_textf)
class TestRead(RegistryTest):
def test_format_and_into_are_none(self):
fh = StringIO()
with self.assertRaises(ValueError):
self.registry.read(fh)
fh.close()
def test_format_is_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(TestClass)
def reader(fh):
self.assertIsInstance(fh, io.TextIOBase)
return TestClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fh, into=TestClass)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
fh.close()
def test_into_is_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO(u'1\n2\n3\n4')
@format1.reader(None)
def reader(fh):
self.assertIsInstance(fh, io.TextIOBase)
for value in [int(x) for x in fh.read().split('\n')]:
yield value
generator = self.registry.read(fh, format='format1')
self.assertIsInstance(generator, types.GeneratorType)
first_run = True
for a, b in zip(generator, [1, 2, 3, 4]):
if first_run:
fh.seek(3)
first_run = False
self.assertEqual(a, b)
self.assertEqual(3, fh.tell())
fh.close()
def test_into_is_none_real_file(self):
format1 = self.registry.create_format('format1')
fp = self.fp1
with open(fp, 'w') as fh:
fh.write('1\n2\n3\n4')
self._test_fh = None
@format1.reader(None)
def reader(fh):
self._test_fh = fh
for value in [int(x) for x in fh.read().split('\n')]:
yield value
generator = self.registry.read(fp, format='format1')
for a, b in zip_longest(generator, [1, 2, 3, 4]):
self.assertEqual(a, b)
self.assertTrue(self._test_fh.closed)
def test_reader_does_not_exist(self):
fh = StringIO()
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.read(fh, format='not_a_format', into=TestClass)
self.assertTrue(TestClass.__name__ in str(cm.exception))
self.assertTrue('not_a_format' in str(cm.exception))
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.read(fh, format='not_a_format2')
self.assertTrue('generator' in str(cm.exception))
self.assertTrue('not_a_format2' in str(cm.exception))
def test_reader_exists_with_verify_true(self):
format1 = self.registry.create_format('format1')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
self.was_verified = True
return '1' in fh.readline(), {}
@format1.reader(TestClass)
def reader(fh):
return TestClass([int(x) for x in fh.read().split('\n')])
self.was_verified = False
instance = self.registry.read(fh, format='format1', into=TestClass,
verify=True)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
self.assertTrue(self.was_verified)
# Remove if read-context management is support in the future.
fh.seek(0)
self.was_verified = False
instance = self.registry.read(fh, format='format1', into=TestClass)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
self.assertTrue(self.was_verified)
fh.close()
def test_warning_raised(self):
format1 = self.registry.create_format('format1')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
self.was_verified = True
return False, {}
@format1.reader(TestClass)
def reader(fh):
return TestClass([int(x) for x in fh.read().split('\n')])
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
with self.assertRaises(FormatIdentificationWarning):
self.was_verified = False
instance = self.registry.read(fh, format='format1',
into=TestClass, verify=True)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
self.assertTrue(self.was_verified)
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
with self.assertRaises(FormatIdentificationWarning):
self.was_verified = False
instance = self.registry.read(fh, format='format1',
into=TestClass)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
self.assertTrue(self.was_verified)
fh.close()
def test_reader_exists_with_verify_false(self):
format1 = self.registry.create_format('format1')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
self.was_verified = True
return '1' in fh.readline(), {}
@format1.reader(TestClass)
def reader(fh):
return TestClass([int(x) for x in fh.read().split('\n')])
self.was_verified = False
instance = self.registry.read(fh, format='format1', into=TestClass,
verify=False)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
self.assertFalse(self.was_verified)
fh.close()
def test_reader_exists_real_file(self):
format1 = self.registry.create_format('format1')
fp = self.fp1
with open(fp, 'w') as fh:
fh.write('1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(TestClass)
def reader(fh):
return TestClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fp, format='format1', into=TestClass)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
def test_read_kwargs_passed_generator(self):
format1 = self.registry.create_format('format1')
@format1.sniffer()
def sniffer(fh):
return True, {'arg1': 15, 'arg2': 'abc'}
@format1.reader(None)
def reader(fh, **kwargs):
self.assertEqual(kwargs['arg1'], 15)
self.assertEqual(kwargs['arg2'], 'abc')
self.assertEqual(kwargs['arg3'], [1])
yield
next(self.registry.read(StringIO(), format='format1', arg3=[1]))
def test_read_kwargs_passed_and_override(self):
format1 = self.registry.create_format('format1')
@format1.sniffer()
def sniffer(fh):
return True, {'arg1': 15, 'arg2': 'abc', 'override': 30}
@format1.reader(TestClass)
def reader(fh, **kwargs):
self.assertEqual(kwargs['arg1'], 15)
self.assertEqual(kwargs['arg2'], 'abc')
self.assertEqual(kwargs['arg3'], [1])
return
self.registry.read(StringIO(u'notempty'), into=TestClass, arg3=[1])
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
# Should raise no warning and thus no error.
self.registry.read(StringIO(u'notempty'), into=TestClass, arg3=[1],
override=30)
# Should raise a warning and thus an error.
with self.assertRaises(ArgumentOverrideWarning):
self.registry.read(StringIO(u'notempty'), into=TestClass,
arg3=[1], override=100)
def test_that_encoding_is_used(self):
format1 = self.registry.create_format('format1')
fp = get_data_path('big5_file')
@format1.sniffer()
def sniffer(fh):
return u'\u4f60' in fh.readline(), {}
@format1.reader(TestClass)
def reader(fh):
self.assertEqual(self._expected_enc, fh.encoding)
return TestClass(fh.readlines())
@format1.reader(None)
def reader_gen(fh):
self.assertEqual(self._expected_enc, fh.encoding)
yield TestClass(fh.readlines())
self._expected_enc = 'big5'
instance = self.registry.read(fp, into=TestClass, encoding='big5')
self.assertEqual(TestClass([u'\u4f60\u597d\n']), instance)
self._expected_enc = 'big5'
gen = self.registry.read(fp, format='format1', encoding='big5')
self.assertEqual(TestClass([u'\u4f60\u597d\n']), next(gen))
def test_non_default_encoding(self):
format1 = self.registry.create_format('format1', encoding='big5')
fp = get_data_path('big5_file')
@format1.sniffer()
def sniffer(fh):
return True, {}
@format1.reader(TestClass)
def reader(fh):
self.assertEqual(self._expected_enc, fh.encoding)
return TestClass(fh.readlines())
@format1.reader(None)
def reader_gen(fh):
self.assertEqual(self._expected_enc, fh.encoding)
yield TestClass(fh.readlines())
self._expected_enc = 'big5'
instance = self.registry.read(fp, into=TestClass)
self.assertEqual(TestClass([u'\u4f60\u597d\n']), instance)
gen = self.registry.read(fp, format='format1')
self.assertEqual(TestClass([u'\u4f60\u597d\n']), next(gen))
gen.close()
self._expected_enc = 'utf8'
with self.assertRaises(UnicodeDecodeError):
self.registry.read(fp, into=TestClass, encoding='utf8')
with self.assertRaises(UnicodeDecodeError):
self.registry.read(fp, format='format1', encoding='utf8')
def test_that_newline_is_used(self):
formatx = self.registry.create_format('formatx')
fp = get_data_path('real_file')
@formatx.sniffer()
def sniffer(fh):
return True, {}
@formatx.reader(TestClass)
def reader(fh):
return TestClass(fh.readlines())
@formatx.reader(None)
def reader_gen(fh):
yield TestClass(fh.readlines())
instance = self.registry.read(fp, into=TestClass, newline='\r')
self.assertEqual(instance, TestClass(['a\nb\nc\nd\ne\n']))
gen = self.registry.read(fp, format='formatx', newline='\r')
self.assertEqual(next(gen), TestClass(['a\nb\nc\nd\ne\n']))
gen.close()
def test_non_default_newline(self):
formatx = self.registry.create_format('formatx', newline='\r')
fp = get_data_path('real_file')
@formatx.sniffer()
def sniffer(fh):
return True, {}
@formatx.reader(TestClass)
def reader(fh):
return TestClass(fh.readlines())
@formatx.reader(None)
def reader_gen(fh):
yield TestClass(fh.readlines())
instance = self.registry.read(fp, into=TestClass)
self.assertEqual(instance, TestClass(['a\nb\nc\nd\ne\n']))
gen = self.registry.read(fp, format='formatx')
self.assertEqual(next(gen), TestClass(['a\nb\nc\nd\ne\n']))
gen.close()
instance = self.registry.read(fp, into=TestClass, newline=None)
self.assertEqual(instance, TestClass(['a\n', 'b\n', 'c\n', 'd\n',
'e\n']))
gen = self.registry.read(fp, format='formatx', newline=None)
self.assertEqual(next(gen), TestClass(['a\n', 'b\n', 'c\n', 'd\n',
'e\n']))
gen.close()
def test_file_sentinel_many(self):
format1 = self.registry.create_format('format1')
extra = get_data_path('real_file')
extra_2 = get_data_path('real_file_2')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(TestClass)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertEqual('a\nb\nc\nd\ne\n', extra.read())
self.assertEqual('!\n@\n#\n$\n%\nThe realest.\n', extra_2.read())
return TestClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fh, format='format1', into=TestClass,
extra=extra, extra_2=extra_2)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
fh.close()
def test_file_sentinel_converted_to_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(TestClass)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
return TestClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fh, format='format1', into=TestClass)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
fh.close()
def test_file_sentinel_pass_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(TestClass)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
return TestClass([int(x) for x in fh.read().split('\n')])
instance = self.registry.read(fh, format='format1', into=TestClass,
extra=None)
self.assertEqual(TestClass([1, 2, 3, 4]), instance)
fh.close()
def test_file_sentinel_generator_many(self):
format1 = self.registry.create_format('format1')
extra = get_data_path('real_file')
extra_2 = get_data_path('real_file_2')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(None)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertEqual('a\nb\nc\nd\ne\n', extra.read())
self.assertEqual('!\n@\n#\n$\n%\nThe realest.\n', extra_2.read())
yield TestClass([int(x) for x in fh.read().split('\n')])
gen = self.registry.read(fh, format='format1', extra=extra,
extra_2=extra_2)
self.assertEqual(TestClass([1, 2, 3, 4]), next(gen))
fh.close()
def test_file_sentinel_converted_to_none_generator(self):
format1 = self.registry.create_format('format1')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(None)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
yield TestClass([int(x) for x in fh.read().split('\n')])
gen = self.registry.read(fh, format='format1')
self.assertEqual(TestClass([1, 2, 3, 4]), next(gen))
fh.close()
def test_file_sentinel_pass_none_generator(self):
format1 = self.registry.create_format('format1')
fh = StringIO(u'1\n2\n3\n4')
@format1.sniffer()
def sniffer(fh):
return '1' in fh.readline(), {}
@format1.reader(None)
def reader(fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
yield TestClass([int(x) for x in fh.read().split('\n')])
gen = self.registry.read(fh, format='format1', extra=None)
self.assertEqual(TestClass([1, 2, 3, 4]), next(gen))
fh.close()
def test_read_with_illegal_encoding(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.sniffer()
def binf_sniffer(fh):
return True, {}
@binf.reader(TestClass)
def binf_reader(fh):
return TestClass(['bin'])
@binf.reader(None)
def binf_reader_gen(fh):
yield TestClass(['bin'])
@textf.sniffer()
def textf_sniffer(fh):
return True, {}
@textf.reader(TestClass)
def textf_reader(fh):
return TestClass(['text'])
@textf.reader(None)
def textf_reader_gen(fh):
yield TestClass(['text'])
# Should skip binary sniffers
instance = self.registry.read(self.fp1, encoding=None, into=TestClass)
self.assertEqual(instance, TestClass(['text']))
gen = self.registry.read(self.fp1, encoding=None, format='textf')
self.assertEqual(next(gen), TestClass(['text']))
gen.close()
# Should skip text sniffers
instance = self.registry.read(self.fp1, encoding='binary',
into=TestClass)
self.assertEqual(instance, TestClass(['bin']))
gen = self.registry.read(self.fp1, encoding='binary', format='binf')
self.assertEqual(next(gen), TestClass(['bin']))
gen.close()
with self.assertRaises(ValueError):
self.registry.read([u'some content\n'], encoding='binary',
into=TestClass)
with self.assertRaises(ValueError):
self.registry.read([u'some content\n'], format='textf',
encoding='binary', into=TestClass)
with self.assertRaises(ValueError):
self.registry.read([u'some content\n'], format='textf',
encoding='binary', verify=False, into=TestClass)
with self.assertRaises(ValueError):
self.registry.read([u'some content\n'], format='textf',
encoding='binary')
with self.assertRaises(ValueError):
self.registry.read([u'some content\n'], format='textf',
encoding='binary', verify=False)
with self.assertRaises(ValueError):
self.registry.read(self.fp1, format='binf',
encoding=None, into=TestClass)
with self.assertRaises(ValueError):
self.registry.read(self.fp1, format='binf',
encoding=None, verify=False, into=TestClass)
with self.assertRaises(ValueError):
self.registry.read(self.fp1, format='binf',
encoding=None)
with self.assertRaises(ValueError):
self.registry.read(self.fp1, format='binf',
encoding=None, verify=False)
def test_read_with_binary_encoding(self):
binf = self.registry.create_format('binf', encoding='binary')
@binf.reader(TestClass)
def reader1(fh):
self.assertIsInstance(fh, (io.BufferedReader, io.BufferedRandom))
return TestClass(['woo'])
@binf.reader(None)
def reader2(fh):
self.assertIsInstance(fh, (io.BufferedReader, io.BufferedRandom))
yield TestClass(['woo'])
instance = self.registry.read(self.fp1, format='binf', verify=False,
into=TestClass)
self.assertEqual(TestClass(['woo']), instance)
gen = self.registry.read(self.fp1, format='binf', verify=False,
into=None)
self.assertEqual(TestClass(['woo']), next(gen))
gen.close()
def test_io_kwargs_passed(self):
format1 = self.registry.create_format('format1')
@format1.sniffer()
def sniffer(fh):
return True, {}
@format1.reader(TestClass)
def reader1(fh):
self.assertEqual(fh.errors, 'replace')
return TestClass(['woo'])
@format1.reader(None)
def reader1_gen(fh):
self.assertEqual(fh.errors, 'replace')
yield TestClass(['woo'])
obj = self.registry.read(self.fp1, into=TestClass, errors='replace')
self.assertEqual(obj, TestClass(['woo']))
gen = self.registry.read(self.fp1, format='format1', errors='replace')
self.assertEqual(next(gen), TestClass(['woo']))
gen.close()
class TestWrite(RegistryTest):
def test_writer_does_not_exist(self):
fh = StringIO()
with self.assertRaises(UnrecognizedFormatError) as cm:
self.registry.write({}, format='not_a_format', into=fh)
self.assertTrue('not_a_format' in str(cm.exception))
self.assertTrue(str(fh) in str(cm.exception))
fh.close()
def test_writer_exists(self):
format1 = self.registry.create_format('format1')
obj = TestClass(['1', '2', '3', '4'])
fh = StringIO()
@format1.writer(TestClass)
def writer(obj, fh):
self.assertIsInstance(fh, io.TextIOBase)
fh.write(u'\n'.join(obj.list))
self.registry.write(obj, format='format1', into=fh)
fh.seek(0)
self.assertEqual("1\n2\n3\n4", fh.read())
fh.close()
def test_writer_exists_real_file(self):
format1 = self.registry.create_format('format1')
obj = TestClass(['1', '2', '3', '4'])
fp = self.fp1
@format1.writer(TestClass)
def writer(obj, fh):
self.assertIsInstance(fh, io.TextIOBase)
fh.write(u'\n'.join(obj.list))
self.registry.write(obj, format='format1', into=fp)
with io.open(fp) as fh:
self.assertEqual(u"1\n2\n3\n4", fh.read())
def test_writer_passed_kwargs(self):
format1 = self.registry.create_format('format1')
@format1.reader(None)
def reader(fh):
yield
@format1.writer(None)
def writer(obj, fh, **kwargs):
self.assertEqual(kwargs['passed'], True)
generator = self.registry.get_reader('format1', None)([])
self.registry.write(generator, format='format1',
into=StringIO(), passed=True)
def test_that_encoding_is_used(self):
format1 = self.registry.create_format('format1')
obj = TestClass([u'\u4f60\u597d\n']) # Ni Hau
fp = self.fp1
@format1.writer(TestClass)
def writer(obj, fh):
fh.write(u''.join(obj.list))
self.assertEqual(self._expected_encoding, fh.encoding)
self._expected_encoding = 'big5'
self.registry.write(obj, format='format1', into=fp, encoding='big5')
with io.open(fp, mode='rb') as fh:
# This would have been b'\xe4\xbd\xa0\xe5\xa5\xbd\n' in utf8
self.assertEqual(b'\xa7A\xa6n\n', fh.read())
def test_non_default_encoding(self):
format1 = self.registry.create_format('format1', encoding='big5')
obj = TestClass([u'\u4f60\u597d\n']) # Ni Hau
fp = self.fp1
@format1.writer(TestClass)
def writer(obj, fh):
fh.write(u''.join(obj.list))
self.assertEqual(self._expected_encoding, fh.encoding)
self._expected_encoding = 'big5'
self.registry.write(obj, format='format1', into=fp)
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'\xa7A\xa6n\n', fh.read())
self._expected_encoding = 'utf8'
self.registry.write(obj, format='format1', into=fp, encoding='utf8')
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'\xe4\xbd\xa0\xe5\xa5\xbd\n', fh.read())
def test_that_newline_is_used(self):
format1 = self.registry.create_format('format1')
obj = TestClass([u'a\n', u'b\n', u'c\n'])
fp = self.fp1
@format1.writer(TestClass)
def writer(obj, fh):
fh.write(u''.join(obj.list))
self.registry.write(obj, format='format1', into=fp, newline='\r')
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'a\rb\rc\r', fh.read())
def test_non_default_newline(self):
format1 = self.registry.create_format('format1', newline='\r')
obj = TestClass([u'a\n', u'b\n', u'c\n'])
fp = self.fp1
@format1.writer(TestClass)
def writer(obj, fh):
fh.write(u''.join(obj.list))
self.registry.write(obj, format='format1', into=fp)
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'a\rb\rc\r', fh.read())
self.registry.write(obj, format='format1', into=fp, newline='\n')
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'a\nb\nc\n', fh.read())
def test_file_sentinel_many(self):
format1 = self.registry.create_format('format1')
fh = StringIO()
@format1.writer(TestClass)
def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
extra.write(u'oh yeah...')
extra_2.write(u'oh no...')
self.registry.write(TestClass([]), format='format1', into=fh,
extra=self.fp1, extra_2=self.fp2)
with open(self.fp1) as f1:
self.assertEqual('oh yeah...', f1.read())
with open(self.fp2) as f2:
self.assertEqual('oh no...', f2.read())
fh.close()
def test_file_sentinel_converted_to_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO()
@format1.writer(TestClass)
def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
self.registry.write(TestClass([]), format='format1', into=fh)
fh.close()
def test_file_sentinel_pass_none(self):
format1 = self.registry.create_format('format1')
fh = StringIO()
@format1.writer(TestClass)
def writer(obj, fh, extra=FileSentinel, other=2, extra_2=FileSentinel):
self.assertIsNone(extra)
self.assertIsNone(extra_2)
self.registry.write(TestClass([]), format='format1', into=fh,
extra=None)
fh.close()
def test_write_with_illegal_encoding(self):
binf = self.registry.create_format('binf', encoding='binary')
textf = self.registry.create_format('textf', encoding=None)
@binf.writer(TestClass)
def writer(obj, fh):
pass
@textf.writer(TestClass)
def writer2(obj, fh):
pass
with self.assertRaises(ValueError):
self.registry.write(TestClass([]), into=self.fp1, format='binf',
encoding=None)
with self.assertRaises(ValueError):
self.registry.write(TestClass([]), into=self.fp1, format='textf',
encoding='binary')
def test_write_binary_format(self):
format1 = self.registry.create_format('format1', encoding='binary')
obj = TestClass([b'a\n', b'b\n', b'c\n'])
fp = self.fp1
@format1.writer(TestClass)
def writer(obj, fh):
self.assertIsInstance(fh, (io.BufferedWriter, io.BufferedRandom))
fh.write(b''.join(obj.list))
self.registry.write(obj, format='format1', into=fp)
with io.open(fp, mode='rb') as fh:
self.assertEqual(b'a\nb\nc\n', fh.read())
def test_io_kwargs_passed(self):
format1 = self.registry.create_format('format1', encoding='ascii')
obj = TestClass([u'a\n', u'b\n', u'c\n'])
fp = self.fp1
f = io.BytesIO()
@format1.writer(TestClass)
def writer(obj, fh):
iterator = iter(obj.list)
fh.write(next(iterator))
fh.flush() # Flush should be a noop for bz2
for l in iterator:
fh.write(l)
self.registry.write(obj, format='format1', into=fp, compression='bz2')
self.registry.write(obj, format='format1', into=f, compression='bz2')
expected = (
b'BZh91AY&SY\x03\x89\x0c\xa6\x00\x00\x01\xc1\x00\x00\x108\x00 \x00'
b'!\x9ah3M\x1c\xb7\x8b\xb9"\x9c(H\x01\xc4\x86S\x00')
with io.open(fp, mode='rb') as fh:
self.assertEqual(expected, fh.read())
self.assertEqual(expected, f.getvalue())
class TestMonkeyPatch(RegistryTest):
def setUp(self):
super(TestMonkeyPatch, self).setUp()
class UnassumingClass(object):
pass
class ClassWithDefault(object):
default_write_format = 'favfmt'
class NoMonkeySee(object):
pass
self.unassuming_class = UnassumingClass
self.class_with_default = ClassWithDefault
self.no_monkey_see = NoMonkeySee
def test_no_readers_writers(self):
self.registry.monkey_patch()
self.assertFalse(hasattr(self.unassuming_class, 'read'))
self.assertFalse(hasattr(self.unassuming_class, 'write'))
self.assertFalse(hasattr(self.class_with_default, 'read'))
self.assertFalse(hasattr(self.class_with_default, 'write'))
def test_readers_only(self):
favfmt = self.registry.create_format('favfmt')
favfmt2 = self.registry.create_format('favfmt2')
@favfmt.reader(self.unassuming_class)
def fvfmt_to_unasumming_class(fh):
return
@favfmt.reader(None)
def fvfmt_to_gen(fh):
yield
@favfmt2.reader(self.unassuming_class)
def fvfmt2_to_unasumming_class(fh):
return
self.registry.monkey_patch()
self.assertTrue(hasattr(self.unassuming_class, 'read'))
self.assertFalse(hasattr(self.unassuming_class, 'write'))
self.assertFalse(hasattr(self.class_with_default, 'read'))
self.assertFalse(hasattr(self.class_with_default, 'write'))
self.assertIn('favfmt', self.unassuming_class.read.__doc__)
self.assertIn('favfmt2', self.unassuming_class.read.__doc__)
def test_writers_only(self):
favfmt = self.registry.create_format('favfmt')
favfmt2 = self.registry.create_format('favfmt2')
@favfmt.writer(self.class_with_default)
def favfmt_writer(fh):
pass
@favfmt.writer(None)
def gen_to_favfmt(fh):
pass
@favfmt2.writer(self.class_with_default)
def favfmt2_writer(fh):
pass
self.registry.monkey_patch()
self.assertFalse(hasattr(self.unassuming_class, 'read'))
self.assertFalse(hasattr(self.unassuming_class, 'write'))
self.assertFalse(hasattr(self.class_with_default, 'read'))
self.assertTrue(hasattr(self.class_with_default, 'write'))
self.assertIn('favfmt', self.class_with_default.write.__doc__)
self.assertIn('favfmt2', self.class_with_default.write.__doc__)
def test_writers_no_default_format(self):
favfmt = self.registry.create_format('favfmt')
favfmt2 = self.registry.create_format('favfmt2')
@favfmt.writer(self.unassuming_class)
def favfmt_writer(fh):
pass
@favfmt.writer(None)
def gen_to_favfmt(fh):
pass
@favfmt2.writer(self.unassuming_class)
def favfmt2_writer(fh):
pass
with self.assertRaises(NotImplementedError) as cm:
self.registry.monkey_patch()
self.assertIn('default_write_format', str(cm.exception))
def test_readers_writers(self):
favfmt = self.registry.create_format('favfmt')
favfmt2 = self.registry.create_format('favfmt2')
@favfmt.reader(self.unassuming_class)
def fvfmt_to_unasumming_class(fh):
return
@favfmt.reader(self.class_with_default)
def fvfmt_to_class_w_default(fh):
return
@favfmt.reader(None)
def fvfmt_to_gen(fh):
yield
@favfmt2.reader(self.unassuming_class)
def fvfmt2_to_unasumming_class(fh):
return
@favfmt2.reader(self.class_with_default)
def fvfmt2_to_class_w_default(fh):
return
@favfmt.writer(self.class_with_default)
def favfmt_writer(fh):
pass
@favfmt.writer(None)
def gen_to_favfmt(fh):
pass
@favfmt2.writer(self.class_with_default)
def favfmt2_writer(fh):
pass
@favfmt2.reader(self.no_monkey_see, monkey_patch=True)
def favfmt2_to_monkey(fh):
pass
@favfmt2.writer(self.no_monkey_see, monkey_patch=False)
def monkey_to_favfmt2(fh):
pass
self.registry.monkey_patch()
self.assertTrue(hasattr(self.unassuming_class, 'read'))
self.assertFalse(hasattr(self.unassuming_class, 'write'))
self.assertTrue(hasattr(self.class_with_default, 'read'))
self.assertTrue(hasattr(self.class_with_default, 'write'))
self.assertTrue(hasattr(self.no_monkey_see, 'read'))
self.assertFalse(hasattr(self.no_monkey_see, 'write'))
self.assertIn('favfmt', self.unassuming_class.read.__doc__)
self.assertIn('favfmt2', self.unassuming_class.read.__doc__)
self.assertIn('favfmt', self.class_with_default.read.__doc__)
self.assertIn('favfmt2', self.class_with_default.read.__doc__)
self.assertIn('favfmt', self.class_with_default.write.__doc__)
self.assertIn('favfmt2', self.class_with_default.write.__doc__)
self.assertIn('favfmt2', self.no_monkey_see.read.__doc__)
def test_read_kwargs_passed(self):
favfmt = self.registry.create_format('favfmt')
self.was_called = False
@favfmt.sniffer()
def fvfmt_sniffer(fh):
return True, {}
@favfmt.reader(self.class_with_default)
def fvfmt_to_class_w_default(fh, **kwargs):
self.assertEqual('a', kwargs['a'])
self.assertEqual(123, kwargs['b'])
self.was_called = True
self.registry.monkey_patch()
fh = StringIO(u'notempty')
self.class_with_default.read(fh, a='a', b=123)
self.assertTrue(self.was_called)
fh.close()
def test_write_kwargs_passed(self):
favfmt = self.registry.create_format('favfmt')
self.was_called = False
@favfmt.writer(self.class_with_default)
def favfmt_writer(obj, fh, **kwargs):
self.assertEqual('a', kwargs['a'])
self.assertEqual(123, kwargs['b'])
self.was_called = True
self.registry.monkey_patch()
fh = StringIO()
self.class_with_default().write(fh, a='a', b=123)
self.assertTrue(self.was_called)
fh.close()
class TestModuleFunctions(unittest.TestCase):
def test_sniff_matches(self):
exp = io_registry.sniff([u'(a, b);'])
result = sniff([u'(a, b);'])
self.assertEqual(exp, result)
self.assertEqual('newick', exp[0])
self.assertEqual({}, exp[1])
def test_read_matches(self):
input = [u'>\n', u'ACGT\n']
exp = io_registry.read(input, into=DNA)
result = read(input, into=DNA)
self.assertEqual(exp, result)
self.assertEqual(exp, DNA('ACGT', metadata={u'id': u'',
u'description': u''}))
def test_write_matches(self):
input = DNA('ACGT')
exp = io_registry.write(input, format='fasta', into=[])
result = write(input, format='fasta', into=[])
self.assertEqual(exp, result)
self.assertEqual(exp, [u'>\n', u'ACGT\n'])
def test_create_format_matches(self):
with self.assertRaises(DuplicateRegistrationError):
io_registry.create_format('fasta')
with self.assertRaises(DuplicateRegistrationError):
create_format('fasta')
if __name__ == '__main__':
unittest.main()
| |
"""
Forward Simulation of Gradiometry Data on a Tree Mesh
=====================================================
Here we use the module *SimPEG.potential_fields.gravity* to predict gravity
gradiometry data for a synthetic density contrast model. The simulation is
carried out on a tree mesh. For this tutorial, we focus on the following:
- How to define the survey when we want multiple field components
- How to predict gravity gradiometry data for a density contrast model
- How to construct tree meshes based on topography and survey geometry
- The units of the density contrast model and resulting data
"""
#########################################################################
# Import Modules
# --------------
#
import numpy as np
from scipy.interpolate import LinearNDInterpolator
import matplotlib as mpl
import matplotlib.pyplot as plt
from discretize import TreeMesh
from discretize.utils import mkvc, refine_tree_xyz
from SimPEG.utils import plot2Ddata, model_builder, surface2ind_topo
from SimPEG import maps
from SimPEG.potential_fields import gravity
# sphinx_gallery_thumbnail_number = 2
#############################################
# Defining Topography
# -------------------
#
# Surface topography is defined as an (N, 3) numpy array. We create it here but
# the topography could also be loaded from a file.
#
[x_topo, y_topo] = np.meshgrid(np.linspace(-200, 200, 41), np.linspace(-200, 200, 41))
z_topo = -15 * np.exp(-(x_topo ** 2 + y_topo ** 2) / 80 ** 2)
x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo)
xyz_topo = np.c_[x_topo, y_topo, z_topo]
#############################################
# Defining the Survey
# -------------------
#
# Here, we define survey that will be used for the forward simulation. Gravity
# surveys are simple to create. The user only needs an (N, 3) array to define
# the xyz locations of the observation locations, and a list of field components
# which are to be measured.
#
# Define the observation locations as an (N, 3) numpy array or load them
x = np.linspace(-80.0, 80.0, 17)
y = np.linspace(-80.0, 80.0, 17)
x, y = np.meshgrid(x, y)
x, y = mkvc(x.T), mkvc(y.T)
fun_interp = LinearNDInterpolator(np.c_[x_topo, y_topo], z_topo)
z = fun_interp(np.c_[x, y]) + 5
receiver_locations = np.c_[x, y, z]
# Define the component(s) of the field we want to simulate as strings within
# a list. Here we measure the x, y and z components of the gravity anomaly at
# each observation location.
components = ["gxz", "gyz", "gzz"]
# Use the observation locations and components to define the receivers. To
# simulate data, the receivers must be defined as a list.
receiver_list = gravity.receivers.Point(receiver_locations, components=components)
receiver_list = [receiver_list]
# Defining the source field.
source_field = gravity.sources.SourceField(receiver_list=receiver_list)
# Defining the survey
survey = gravity.survey.Survey(source_field)
##########################################################
# Defining an OcTree Mesh
# -----------------------
#
# Here, we create the OcTree mesh that will be used in the forward simulation.
#
dx = 5 # minimum cell width (base mesh cell width) in x
dy = 5 # minimum cell width (base mesh cell width) in y
dz = 5 # minimum cell width (base mesh cell width) in z
x_length = 240.0 # domain width in x
y_length = 240.0 # domain width in y
z_length = 120.0 # domain width in z
# Compute number of base mesh cells required in x and y
nbcx = 2 ** int(np.round(np.log(x_length / dx) / np.log(2.0)))
nbcy = 2 ** int(np.round(np.log(y_length / dy) / np.log(2.0)))
nbcz = 2 ** int(np.round(np.log(z_length / dz) / np.log(2.0)))
# Define the base mesh
hx = [(dx, nbcx)]
hy = [(dy, nbcy)]
hz = [(dz, nbcz)]
mesh = TreeMesh([hx, hy, hz], x0="CCN")
# Refine based on surface topography
mesh = refine_tree_xyz(
mesh, xyz_topo, octree_levels=[2, 2], method="surface", finalize=False
)
# Refine box based on region of interest
xp, yp, zp = np.meshgrid([-100.0, 100.0], [-100.0, 100.0], [-80.0, 0.0])
xyz = np.c_[mkvc(xp), mkvc(yp), mkvc(zp)]
mesh = refine_tree_xyz(mesh, xyz, octree_levels=[2, 2], method="box", finalize=False)
mesh.finalize()
#######################################################
# Density Contrast Model and Mapping on OcTree Mesh
# -------------------------------------------------
#
# Here, we create the density contrast model that will be used to simulate gravity
# gradiometry data and the mapping from the model to the mesh. The model
# consists of a less dense block and a more dense sphere.
#
# Define density contrast values for each unit in g/cc
background_density = 0.0
block_density = -0.1
sphere_density = 0.1
# Find the indecies for the active mesh cells (e.g. cells below surface)
ind_active = surface2ind_topo(mesh, xyz_topo)
# Define mapping from model to active cells. The model consists of a value for
# each cell below the Earth's surface.
nC = int(ind_active.sum())
model_map = maps.IdentityMap(nP=nC) # model will be value of active cells
# Define model. Models in SimPEG are vector arrays.
model = background_density * np.ones(nC)
# You could find the indicies of specific cells within the model and change their
# value to add structures.
ind_block = (
(mesh.gridCC[ind_active, 0] > -50.0)
& (mesh.gridCC[ind_active, 0] < -20.0)
& (mesh.gridCC[ind_active, 1] > -15.0)
& (mesh.gridCC[ind_active, 1] < 15.0)
& (mesh.gridCC[ind_active, 2] > -50.0)
& (mesh.gridCC[ind_active, 2] < -30.0)
)
model[ind_block] = block_density
# You can also use SimPEG utilities to add structures to the model more concisely
ind_sphere = model_builder.getIndicesSphere(np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC)
ind_sphere = ind_sphere[ind_active]
model[ind_sphere] = sphere_density
# Plot Density Contrast Model
fig = plt.figure(figsize=(9, 4))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
ax1 = fig.add_axes([0.1, 0.12, 0.73, 0.78])
mesh.plotSlice(
plotting_map * model,
normal="Y",
ax=ax1,
ind=int(mesh.hy.size / 2),
grid=True,
clim=(np.min(model), np.max(model)),
pcolorOpts={"cmap": "viridis"},
)
ax1.set_title("Model slice at y = 0 m")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("z (m)")
ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78])
norm = mpl.colors.Normalize(vmin=np.min(model), vmax=np.max(model))
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis
)
cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12)
plt.show()
##############################################################
# Simulation: Gravity Gradiometry Data on an OcTree Mesh
# ------------------------------------------------------
#
# Here we demonstrate how to predict gravity anomaly data using the integral
# formulation.
#
# Define the forward simulation. By setting the 'store_sensitivities' keyword
# argument to "forward_only", we simulate the data without storing the sensitivities
simulation = gravity.simulation.Simulation3DIntegral(
survey=survey,
mesh=mesh,
rhoMap=model_map,
actInd=ind_active,
store_sensitivities="forward_only",
)
# Compute predicted data for some model
dpred = simulation.dpred(model)
n_data = len(dpred)
# Plot
fig = plt.figure(figsize=(10, 3))
n_locations = receiver_locations.shape[0]
v_max = np.max(np.abs(dpred))
ax1 = fig.add_axes([0.1, 0.15, 0.25, 0.78])
cplot1 = plot2Ddata(
receiver_locations,
dpred[0:n_data:3],
ax=ax1,
ncontour=30,
clim=(-v_max, v_max),
contourOpts={"cmap": "bwr"},
)
cplot1[0].set_clim((-v_max, v_max))
ax1.set_title("$\partial g /\partial x$")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("y (m)")
ax2 = fig.add_axes([0.36, 0.15, 0.25, 0.78])
cplot2 = plot2Ddata(
receiver_locations,
dpred[1:n_data:3],
ax=ax2,
ncontour=30,
clim=(-v_max, v_max),
contourOpts={"cmap": "bwr"},
)
cplot2[0].set_clim((-v_max, v_max))
ax2.set_title("$\partial g /\partial y$")
ax2.set_xlabel("x (m)")
ax2.set_yticks([])
ax3 = fig.add_axes([0.62, 0.15, 0.25, 0.78])
cplot3 = plot2Ddata(
receiver_locations,
dpred[2:n_data:3],
ax=ax3,
ncontour=30,
clim=(-v_max, v_max),
contourOpts={"cmap": "bwr"},
)
cplot3[0].set_clim((-v_max, v_max))
ax3.set_title("$\partial g /\partial z$")
ax3.set_xlabel("x (m)")
ax3.set_yticks([])
ax4 = fig.add_axes([0.89, 0.13, 0.02, 0.79])
norm = mpl.colors.Normalize(vmin=-v_max, vmax=v_max)
cbar = mpl.colorbar.ColorbarBase(
ax4, norm=norm, orientation="vertical", cmap=mpl.cm.bwr
)
cbar.set_label("$mgal/m$", rotation=270, labelpad=15, size=12)
plt.show()
| |
'''
Dts_TranslucentSort.py
Portions Copyright (c) 2004 - 2006 James Urquhart(j_urquhart@btinternet.com)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Original Code Copyright (C) GarageGames.com, Inc.
'''
import bpy
# hold indices and sizes of biggest faces...these are marked as higher priority for splitting with
bigFaces = []
bigFaceSizes = []
# planes in this list are hidden because we are on the other side of them at the time...
noAddNormals = []
def getMinMaxExtents(x, v0, v1, v2):
xmin = xmax = x.dot(v0)
dot = x.dot(v1)
if xmin > dot:
xmin = dot
elif xmax < dot:
xmax = dot
dot = x.dot(v2)
if xmin > dot:
xmin = dot
elif xmax < dot:
xmax = dot
return xmin, xmax
class FaceInfo:
def __init__(self):
self.used = False
self.priority = -1
self.parentFace = 0
self.childFace1 = 0
self.childFace2 = 0
self.childFace3 = 0
self.normal = Vector()
self.k = .0
self.isInFrontOfMe = []
self.isBehindMe = []
self.isCutByMe = []
self.isCoPlanarWithMe = []
def __del__(self):
del self.isInFrontOfMe
del self.isBehindMe
del self.isCutByMe
del self.isCoPlanarWithMe
def __str__(self):
if not self.used:
retStr = "Face (Unused)"
else:
retStr = "Face (Used)"
retStr += "Priority: %d\n \
parentFace: %d\n \
Child Faces: %d %d %d\n \
Plane : (%f %f %f)/%f \n \
Front Faces: %s\n \
Back Faces : %s\n \
Cut Faces : %s\n \
Co Faces : %s" % (
self.priority, self.parentFace, self.childFace1, self.childFace2, self.childFace3, self.normal[0],
self.normal[1], self.normal[2], self.k, self.isInFrontOfMe, self.isBehindMe, self.isCutByMe,
self.isCoPlanarWithMe)
return retStr
class TranslucentSort:
def __init__(self, faces=[], indices=[], verts=[], norms=[], tverts=[], numBigFaces=0, maxDepth=1, zLayerUp=False,
zLayerDown=False):
global bigFaces, bigFaceSizes
self.frontClusters = []
self.backClusters = []
self.middleCluster = []
self.splitNormal = Vector()
self.splitK = .0
self.mNumBigFaces = numBigFaces
self.mMaxDepth = maxDepth
self.mZLayerUp = zLayerUp
self.mZLayerDown = zLayerDown
self.currentDepth = 0
self.frontSort = None
self.backSort = None
self.faceInfoList = []
self.saveFaceInfoList = []
self.mFaces = faces
self.mIndices = indices
self.mVerts = verts
self.mNorms = norms
self.mTVerts = tverts
self.initFaces()
def makefrom(self):
newSort = TranslucentSort(self.mFaces, self.mIndices, self.mVerts, self.mNorms, self.mTVerts)
# ^^ NOTE: Not sure if the above is safe
newSort.faceInfoList = [None] * len(self.faceInfoList)
for i in range(0, len(newSort.faceInfoList)):
newSort.faceInfoList[i] = copy.copy(self.faceInfoList[i])
newSort.currentDepth = self.currentDepth + 1
newSort.mMaxDepth = self.mMaxDepth
newSort.mZLayerUp = self.mZLayerUp
newSort.mZLayerDown = self.mZLayerDown
newSort.mNumBigFaces = 0 # never used...
return newSort
def __del__(self):
del self.frontSort
del self.backSort
clearArray(self.faceInfoList)
clearArray(self.saveFaceInfoList)
clearArray(self.frontClusters)
clearArray(self.backClusters)
def initFaces(self):
self.faceInfoList = [None] * len(self.mFaces)
for i in range(0, len(self.faceInfoList)):
self.faceInfoList[i] = FaceInfo()
self.faceInfoList[i].used = False
for i in range(0, len(self.mFaces)):
self.initFaceInfo(self.mFaces[i], self.faceInfoList[i])
self.setFaceInfo(self.mFaces[i], self.faceInfoList[i])
def initFaceInfo(self, face, faceInfo, setPriority=True):
global bigFaces, bigFaceSizes
faceInfo.used = False
faceInfo.parentFace = -1
faceInfo.childFace1 = -1
faceInfo.childFace2 = -1
faceInfo.childFace3 = -1
# get normal and plane constant
idx0 = self.mIndices[face.firstElement + 0]
idx1 = self.mIndices[face.firstElement + 1]
idx2 = self.mIndices[face.firstElement + 2]
vert0 = self.mVerts[idx0]
vert1 = self.mVerts[idx1]
vert2 = self.mVerts[idx2]
# compute normal using largest gap...
edge01 = vert1 - vert0
edge12 = vert2 - vert1
edge20 = vert0 - vert2
if edge01.dot(edge01) >= edge12.dot(edge12) and edge01.dot(edge01) >= edge20.dot(edge20):
# edge01 biggest gap
faceInfo.normal = edge12.cross(edge20) * -1.0
elif edge12.dot(edge12) >= edge20.dot(edge20) and edge12.dot(edge12) >= edge01.dot(edge01):
# edge12 biggest gap
faceInfo.normal = edge20.cross(edge01) * -1.0
else:
# edge20 biggest gap
faceInfo.normal = edge01.cross(edge12) * -1.0
faceInfo.normal = faceInfo.normal.normalize()
faceInfo.k = faceInfo.normal.dot(vert0)
if setPriority:
faceInfo.priority = 0
maxExtent = edge01.dot(edge01)
if maxExtent < edge12.dot(edge12):
maxExtent = edge12.dot(edge12)
if maxExtent < edge20.dot(edge20):
maxExtent = edge20.dot(edge20)
for i in range(0, self.mNumBigFaces):
if i == len(bigFaceSizes) or maxExtent > bigFaceSizes[i]:
bigFaceSizes.insert(i, maxExtent)
count = 0
for f in self.mFaces:
if f == face:
faceIdx = count
break
count += 1
bigFaces.insert(i, faceIdx)
while i < len(bigFaceSizes):
if i < self.mNumBigFaces:
self.faceInfoList[bigFaces[i]].priority = self.mNumBigFaces - i
else:
self.faceInfoList[bigFaces[i]].priority = 0
i += 1
while len(bigFaceSizes) > self.mNumBigFaces:
del bigFaceSizes[-1]
del bigFaces[-1]
break
def setFaceInfo(self, face, faceInfo):
faceInfo.isInFrontOfMe = [False] * len(self.mFaces)
faceInfo.isBehindMe = [False] * len(self.mFaces)
faceInfo.isCutByMe = [False] * len(self.mFaces)
faceInfo.isCoPlanarWithMe = [False] * len(self.mFaces)
normal = faceInfo.normal
k = faceInfo.k
count = 0
for f in self.mFaces:
if f == face:
myIndex = count
break
count += 1
for i in range(0, len(self.mFaces)):
if i == myIndex or self.faceInfoList[i].used:
continue
otherFace = self.mFaces[i]
idx0 = self.mIndices[otherFace.firstElement + 0]
idx1 = self.mIndices[otherFace.firstElement + 1]
idx2 = self.mIndices[otherFace.firstElement + 2]
v0 = self.mVerts[idx0]
v1 = self.mVerts[idx1]
v2 = self.mVerts[idx2]
hasFrontVert, hasBackVert = False, False
if normal.dot(v0) > k + PlaneF.EPSILON:
hasFrontVert = True
elif normal.dot(v0) < k - PlaneF.EPSILON:
hasBackVert = True
if normal.dot(v1) > k + PlaneF.EPSILON:
hasFrontVert = True
elif normal.dot(v1) < k - PlaneF.EPSILON:
hasBackVert = True
if normal.dot(v2) > k + PlaneF.EPSILON:
hasFrontVert = True
elif normal.dot(v2) < k - PlaneF.EPSILON:
hasBackVert = True
if hasFrontVert and not hasBackVert:
faceInfo.isInFrontOfMe[i] = True
elif not hasFrontVert and hasBackVert:
faceInfo.isBehindMe[i] = True
elif hasFrontVert and hasBackVert:
faceInfo.isCutByMe[i] = True
elif not hasFrontVert and not hasBackVert:
faceInfo.isCoPlanarWithMe[i] = True
def clearFaces(self, removeThese):
i = 0
for faceInfo in self.faceInfoList:
faceInfo.isInFrontOfMe = subtractSet(faceInfo.isInFrontOfMe, removeThese)
faceInfo.isBehindMe = subtractSet(faceInfo.isBehindMe, removeThese)
faceInfo.isCutByMe = subtractSet(faceInfo.isCutByMe, removeThese)
faceInfo.isCoPlanarWithMe = subtractSet(faceInfo.isCoPlanarWithMe, removeThese)
if removeThese[i]:
faceInfo.used = True
i += 1
def saveFaceInfo(self):
while len(self.saveFaceInfoList) != 0:
del self.saveFaceInfoList[0]
self.saveFaceInfoList = [None] * len(self.faceInfoList)
for i in range(0, len(self.saveFaceInfoList)):
self.saveFaceInfoList[i] = copy.copy(self.faceInfoList[i])
def restoreFaceInfo(self):
for i in range(0, len(saveFaceInfo)):
self.faceInfoList[i] = self.saveFaceInfoList[i]
def addFaces(self, addClusters, faces, indices, continueLast=False):
global noAddNormals
startFaces = len(faces)
for c in addClusters:
if type(c) == list:
if startFaces != len(faces):
nVal = False
else:
nVal = continueLast
changedFaces = self.addFaces(c, faces, indices, nVal)
else:
toAdd = addClusters
startNewFace = not continueLast or len(faces) == 0
while allSet(toAdd):
for i in range(0, len(self.mFaces)):
if not startNewFace and faces[-1].matindex != self.mFaces[i].matindex:
continue
if not toAdd[i]:
continue
for k in range(0, len(noAddNormals)):
if noAddNormals[k].dot(self.faceInfoList[i].normal) > .99:
toAdd[i] = False
if not toAdd[i]:
continue
# add this face...
if startNewFace:
faces.append(Primitive(len(indices), 0, self.mFaces[i].matindex))
startNewFace = False
faces[-1].numElements += 3
indices.append(self.mIndices[self.mFaces[i].firstElement + 0])
indices.append(self.mIndices[self.mFaces[i].firstElement + 1])
indices.append(self.mIndices[self.mFaces[i].firstElement + 2])
toAdd[i] = False
startNewFace = True
def addOrderedFaces(self, orderedCluster, faces, indices, continueLast=False):
global noAddNormals
toAdd = orderedCluster
startNewFace = not continueLast or len(faces) == 0
while len(toAdd) != 0:
i = 0
while i < len(toAdd):
k = 0
while k < len(noAddNormals):
if noAddNormals[k].dot(self.faceInfoList[toAdd[i]].normal) > .99:
del toAdd[i]
i -= 1
break
k += 1
if k != len(noAddNormals):
continue
if not startNewFace and self.mFaces[toAdd[i]].matindex != faces[-1].matindex:
continue
if startNewFace:
faces.append(Primitive(len(indices), 0, self.mFaces[toAdd[i]].matindex))
startNewFace = False
faces[-1].numElements += 3
indices.append(self.mIndices[self.mFaces[toAdd[i]].firstElement + 0])
indices.append(self.mIndices[self.mFaces[toAdd[i]].firstElement + 1])
indices.append(self.mIndices[self.mFaces[toAdd[i]].firstElement + 2])
del toAdd[i]
i -= 1
startNewFace = True
def splitFace(self, faceIndex, normal, k):
idx0 = self.mIndices[self.mFaces[faceIndex].firstElement + 0]
idx1 = self.mIndices[self.mFaces[faceIndex].firstElement + 1]
idx2 = self.mIndices[self.mFaces[faceIndex].firstElement + 2]
v0 = self.mVerts[idx0]
v1 = self.mVerts[idx1]
v2 = self.mVerts[idx2]
k0 = normal.dot(v0)
k1 = normal.dot(v1)
k2 = normal.dot(v2)
# if v0, v1, or v2 is on the plane defined by normal and k, call special case routine
if math.fabs(k0 - k) < epsilon or math.fabs(k1 - k) < epsilon or math.fabs(k2 - k) < epsilon:
self.splitFace2(faceIndex, normal, k)
return
# find the odd man out (the vertex alone on his side of the plane)
code, rogue = 0, 0
if k0 < k:
code |= 1
if k1 < k:
code |= 2
if k2 < k:
code |= 4
if code == 1 or code == 6:
rogue = 0
elif code == 2 or code == 5:
rogue = 1
elif code == 4 or code == 3:
rogue = 2
elif code == 0 or code == 7:
return # shouldn't happen...
# re-order verts so that rogue vert is first vert
idx0 = self.mIndices[self.mFaces[faceIndex].firstElement + ((rogue + 0) % 3)]
idx1 = self.mIndices[self.mFaces[faceIndex].firstElement + ((rogue + 1) % 3)]
idx2 = self.mIndices[self.mFaces[faceIndex].firstElement + ((rogue + 2) % 3)]
v0 = self.mVerts[idx0]
v1 = self.mVerts[idx1]
v2 = self.mVerts[idx2]
k0 = normal.dot(v0)
k1 = normal.dot(v1)
k2 = normal.dot(v2)
tv0 = self.mTVerts[idx0]
tv1 = self.mTVerts[idx1]
tv2 = self.mTVerts[idx2]
n0 = self.mNorms[idx0]
n1 = self.mNorms[idx1]
n2 = self.mNorms[idx2]
# find intersection of edges and plane
a01 = (k - k0) / (k1 - k0)
a02 = (k - k0) / (k2 - k0)
v01 = v1 - v0
v01 *= a01
v01 += v0
tv01 = tv1 - tv0
tv01 *= a01
tv01 += tv0
v02 = v2 - v0
v02 *= a02
v02 += v0
tv02 = tv2 - tv0
tv02 *= a02
tv02 += tv0
# interpolate the normals too (we'll just linearly interpolate...perhaps slerp if later)
n01 = n1 - n0
n01 *= a01
n01 += n0
n01.normalize()
n02 = n2 - n0
n02 *= a02
n02 += n0
n02.normalize()
# add two new verst
idx01 = len(self.mVerts)
self.mVerts.append(v01)
self.mNorms.append(n01)
self.mTVerts.append(tv01)
idx02 = len(self.mVerts)
self.mVerts.append(v02)
self.mNorms.append(n02)
self.mTVerts.append(tv02)
# add three faces :
# add "rogue" face
self.mFaces.append(Primitive(len(self.mIndices), 3, self.mFaces[faceIndex].matindex))
self.mIndices.append(idx0)
self.mIndices.append(idx01)
self.mIndices.append(idx02)
# add idx01, idx1, idx02
self.mFaces.append(Primitive(len(self.mIndices), 3, self.mFaces[faceIndex].matindex))
self.mIndices.append(idx01)
self.mIndices.append(idx1)
self.mIndices.append(idx02)
# add idx2, idx02, idx01
self.mFaces.append(Primitive(len(self.mIndices), 3, self.mFaces[faceIndex].matindex))
self.mIndices.append(idx2)
self.mIndices.append(idx02)
self.mIndices.append(idx1)
# finally, set faceInfo
numFaces = len(self.mFaces)
self.faceInfoList.append(FaceInfo())
self.faceInfoList.append(FaceInfo())
self.faceInfoList.append(FaceInfo())
self.faceInfoList[faceIndex].used = True
self.faceInfoList[faceIndex].childFace1 = nuself.mFaces - 3
self.faceInfoList[faceIndex].childFace2 = nuself.mFaces - 2
self.faceInfoList[faceIndex].childFace3 = nuself.mFaces - 1
self.initFaceInfo(self.mFaces[numFaces - 3], self.faceInfoList[numFaces - 3], False)
self.initFaceInfo(self.mFaces[numFaces - 2], self.faceInfoList[numFaces - 2], False)
self.initFaceInfo(self.mFaces[numFaces - 1], self.faceInfoList[numFaces - 1], False)
self.faceInfoList[numFaces - 3].priority = self.faceInfoList[faceIndex].priority
self.faceInfoList[numFaces - 2].priority = self.faceInfoList[faceIndex].priority
self.faceInfoList[numFaces - 1].priority = self.faceInfoList[faceIndex].priority
self.faceInfoList[numFaces - 3].parentFace = faceIndex
self.faceInfoList[numFaces - 2].parentFace = faceIndex
self.faceInfoList[numFaces - 1].parentFace = faceIndex
def splitFace2(self, faceIndex, normal, k):
idx0 = self.mIndices[self.mFaces[faceIndex].firstElement + 0]
idx1 = self.mIndices[self.mFaces[faceIndex].firstElement + 1]
idx2 = self.mIndices[self.mFaces[faceIndex].firstElement + 2]
v0 = self.mVerts[idx0]
v1 = self.mVerts[idx1]
v2 = self.mVerts[idx2]
k0 = normal.dot(v0)
k1 = normal.dot(v1)
k2 = normal.dot(v2)
# make sure we got here legitimately
if math.fabs(k0 - k) >= PlaneF.EPSILON and math.fabs(k1 - k) >= PlaneF.EPSILON and math.fabs(
k2 - k) >= PlaneF.EPSILON:
print("TODO: ASSERT")
# find the odd man out (the vertex that is on the plane)
rogue
if math.fabs(k0 - k) < PlaneF.EPSILON:
rogue = 0
elif math.fabs(k1 - k) < PlaneF.EPSILON:
rogue = 1
elif math.fabs(k2 - k) < PlaneF.EPSILON:
rogue = 2
else:
print("TODO: ASSERT")
# re-order verts so that rogue vert is first vert
idx0 = self.mIndices[self.mFaces[faceIndex].firstElement + ((rogue + 0) % 3)]
idx1 = self.mIndices[self.mFaces[faceIndex].firstElement + ((rogue + 1) % 3)]
idx2 = self.mIndices[self.mFaces[faceIndex].firstElement + ((rogue + 2) % 3)]
v0 = self.mVerts[idx0]
v1 = self.mVerts[idx1]
v2 = self.mVerts[idx2]
k0 = normal.dot(v0)
k1 = normal.dot(v1)
k2 = normal.dot(v2)
tv0 = self.mTVerts[idx0]
tv1 = self.mTVerts[idx1]
tv2 = self.mTVerts[idx2]
n0 = self.mNorms[idx0]
n1 = self.mNorms[idx1]
n2 = self.mNorms[idx2]
# find intersection of edges and plane
a12 = (k - k1) / (k2 - k1)
v12 = v2 - v1
v12 *= a12
v12 += v1
tv12 = tv2 - tv1
tv12 *= a12
tv12 += tv1
# interpolate the normals too (we'll just linearly interpolate..perhaps slerp if later)
n12 = n2 - n1
n12 *= a12
n12 += n1
n12.normalize()
# add new vert
idx12 = len(self.mVerts)
self.mVerts.append(v12)
self.mNorms.append(n12)
self.mTVerts.append(tv12)
# add two faces:
self.mFaces.append(Primitive(len(self.mIndices), 3, self.mFaces[faceIndex].matindex))
# add idx0, idx2, idx12
self.mIndices.append(idx0)
self.mIndices.append(idx2)
self.mIndices.append(idx12)
self.mFaces.append(Primitive(len(self.mIndices), 3, self.mFaces[faceIndex].matindex))
# add idx0, idx12, idx1
self.mIndices.append(idx0)
self.mIndices.append(idx12)
self.mIndices.append(idx1)
# finally, set faceInfo
numFaces = len(self.mFaces)
self.faceInfoList.append(FaceInfo())
self.faceInfoList.append(FaceInfo())
self.faceInfoList[faceIndex].used = True
self.faceInfoList[faceIndex].childFace1 = numFaces - 2
self.faceInfoList[faceIndex].childFace2 = numFaces - 1
self.faceInfoList[faceIndex].childFace3 = -1
self.initFaceInfo(self.mFaces[numFaces - 2], self.faceInfoList[numFaces - 2], False)
self.initFaceInfo(self.mFaces[numFaces - 1], self.faceInfoList[numFaces - 1], False)
self.faceInfoList[numFaces - 2].priority = self.faceInfoList[faceIndex].priority
self.faceInfoList[numFaces - 1].priority = self.faceInfoList[faceIndex].priority
self.faceInfoList[numFaces - 2].parentFace = faceIndex
self.faceInfoList[numFaces - 1].parentFace = faceIndex
def sort(self):
i = 0
while i != len(self.faceInfoList):
if not self.faceInfoList[i].used:
break
i += 1
if i == len(self.faceInfoList):
return # no unused faces...
while 1:
# 1. select faces with no one behind them -- these guys get drawn first
self.backClusters.append([False] * len(self.mFaces))
for i in range(0, len(self.faceInfoList)):
if not self.faceInfoList[i].used and not allSet(self.faceInfoList[i].isBehindMe) and not allSet(
self.faceInfoList[i].isCutByMe):
self.backClusters[-1][i] = True
self.faceInfoList[i].used = True # select as used so we don't grab it below
# 2. select faces with no one in front of them -- these guys get drawn last
self.frontClusters.insert(0, [False] * len(self.mFaces))
for i in range(0, len(self.faceInfoList)):
if not self.faceInfoList[i].used and not allSet(self.faceInfoList[i].isInFrontOfMe) and not allSet(
self.faceInfoList[i].isCutByMe):
self.frontClusters[0][i] = True
self.faceInfoList[i].used = True # this won't have any effect, but it's here to parallel above
# 3. clear above faces and repeat 1&2 until no more faces found in either step
removeThese = overlapSet(self.backClusters[-1], self.frontClusters[0])
if not allSet(removeThese):
# didn't remove anything
break
self.clearFaces(removeThese)
# 4. pick face cutting fewest other faces and resulting in most balanced split, call this cutFace
fewestCuts = 0
balance = 0
priority = 0
cutFace = -1
for i in range(0, len(self.mFaces)):
if self.faceInfoList[i].used:
continue
cut, front, back = 0, 0, 0
for j in range(0, len(self.mFaces)):
if self.faceInfoList[j].used:
continue
if self.faceInfoList[i].isCutByMe[j]:
cut += 1
if self.faceInfoList[i].isInFrontOfMe[j]:
front += 1
if self.faceInfoList[i].isBehindMe[j]:
back += 1
if cutFace != -1:
if self.faceInfoList[i].priority < priority:
continue
if self.faceInfoList[i].priority == priority:
if (cut > fewestCuts) or (cut == fewestCuts and math.fabs(front - back) >= balance):
continue
# if we get this far, this is our new cutFace
cutFace = i
fewestCuts = cut
priority = faceInfoList[i].priority
balance = math.fabs(front - back)
if cutFace >= 0 and self.currentDepth < self.mMaxDepth:
# 5. cut all faces cut by cutFace
if allSet(self.faceInfoList[cutFace].isCutByMe):
startSize = len(mFaces) # won't need to split beyond here, even though more faces added
for i in range(0, startSize):
if not self.faceInfoList[i].used and self.faceInfoList[cutFace].isCutByMe[i]:
self.splitFace(i, self.faceInfoList[cutFace].normal, self.faceInfoList[cutFace].k)
# may be new faces and some old faces may have been disabled, recompute face info
for i in range(0, len(self.mFaces)):
if not self.faceInfoList[i].used:
self.setFaceInfo(self.mFaces[i], self.faceInfoList[i])
startNumFaces = len(self.mFaces)
disableSet = [False] * len(self.mFaces)
# 6. branch into two orders depending on which side of cutFace camera is, perform translucent sort on each
# back
self.backSort = self.makefrom()
for i in range(0, len(self.mFaces)):
if self.backSort.faceInfoList[i].used or self.backSort.faceInfoList[cutFace].isBehindMe[i]:
continue
if self.backSort.faceInfoList[cutFace].isCutByMe[i]:
print("TODO: Assert") # doh, perform hard assert :(...
if self.backSort.faceInfoList[cutFace].isCoplanarWithMe[i] or cutFace == i:
if self.backSort.faceInfoList[cutFace].normal.dot(backsort.faceInfoList[i].normal) > 0.0:
continue
elif not self.backSort.faceInfoList[cutFace].isInFrontOfMe[i] and cutFace != i:
print("TODO: Assert")
disableSet[i] = True
if not allSet(disableSet):
print("TODO: Assert")
self.backSort.clearFaces(disableSet)
self.backSort.sort()
if backSort.backSort == None and backSort.frontSort == None and len(backSort.frontClusters) == 0 and len(
backSort.backClusters) == 0:
# empty, no reason to keep backSort
del self.backSort
self.backSort = None
# create faceInfo entry for any faces that got added (set used=True)
self.faceInfoList = [None] * (len(self.faceInfoList) - len(self.mFaces))
for i in range(startNumFaces, len(self.faceInfoList)):
self.faceInfoList[i] = FaceInfo()
self.faceInfoList[i].used = True
# front
self.frontSort = self.makefrom()
disableSet = [False] * len(self.mFaces)
for i in range(0, len(self.mFaces)):
if self.frontSort.faceInfoList[i].used or self.frontSort.faceInfoList[cutFace].isInFrontOfMe[i]:
continue
if self.frontSort.faceInfoList[cutFace].isCutByMe[i]:
print("TODO: Assert") # doh, perform hard assert...
if self.frontSort.faceInfoList[cutFace].isCoplanarWithMe[i] or cutFace == i:
if frontSort.faceInfoList[cutFace].normal.dot(frontSort.faceInfoList[i].normal) > 0.0:
continue
elif not frontSort.faceInfoList[cutFace].isBehindMe[i] and i != cutFace:
print("TODO: Assert")
disableSet[i] = True
if not allSet(disableSet):
print("TODO: Assert")
self.frontSort.clearFaces(disableSet)
self.frontSort.sort()
if self.frontSort.backSort == None and self.frontSort.frontSort == None and len(
self.frontSort.frontClusters == 0) and len(self.frontSort.backClusters == 0):
# empty, no reason to keep backSort
del self.backSort
self.backSort = None
# setup cut plane
self.splitNormal = self.faceInfoList[cutFace].normal
self.splitK = self.faceInfoList[cutFace].k
elif cutFace >= 0:
# we've gotten too deep, just dump the remaing faces -- but dump in best order we can
if mZLayerUp:
self.middleCluster = self.layerSort(True)
elif mZLayerDown:
self.middleCluster = self.layerSort(False)
else:
self.middleCluster = self.copeSort()
# routines for sorting faces when there is no perfect solution for all cases
def copeSort(self):
frontOrderedCluster, backOrderedCluster, cluster = [], [], []
# restore after following loop
self.saveFaceInfo()
while 1:
bestFace = -1
bestCount = 0x7FFFFFFF
front = False
# we need to find face with fewest polys behind or in front (cut implies both)
for i in range(0, len(self.faceInfoList)):
if self.faceInfoList[i].used:
continue
frontCount = 0
backCount = 0
for j in range(0, len(self.faceInfoList)):
if self.faceInfoList[j].used:
continue
if self.faceInfoList[i].isInFrontOfMe[j]:
frontCount += 1
elif self.faceInfoList[i].isBehindMe[j]:
backCount += 1
elif self.faceInfoList[i].isCutByMe[j]:
frontCount += 1
backCount += 1
if backCount < bestCount or bestFace < 0:
bestCount = backCount
bestFace = i
front = false
if frontCount == 0 and frontCount < bestCount:
bestCount = frontCount
bestFace = i
front = true
if bestFace != -1:
if front:
frontOrderedCluster.insert(0, bestFace)
else:
backOrderedCluster.append(bestFace)
self.clearFaces([True] * len(self.mFaces))
else:
break
cluster = backOrderedCluster + frontOrderedCluster
# we need face info back...
self.restoreFaceInfo()
# we have a good ordering...but see if we can make some local optimizations
i = 0
while i < len(cluster):
face1 = cluster[i]
faceInfo1 = self.faceInfoList[face1]
for j in range(i + 1, len(cluster)):
face2 = cluster[j]
faceInfo2 = self.faceInfoList[face2]
if (faceInfo1.isBehindMe[face2] and faceInfo2.isInFrontOfMe[face1]) or (
faceInfo1.isCutByMe[face2] and faceInfo2.isInFrontOfMe[face1]) or (
faceInfo1.isBehindMe[face2] and faceInfo2.isCutByMe[face1]):
# these two guys should be switched...now check to see if we can do it
k = i
while k < j:
k += 1
face12 = cluster[k]
faceInfo12 = self.faceInfoList[face12]
# Currently, face1 precedes face12 in the list...under what conditions is it ok
# to have face1 follow face12? Answer: face12 behind face1, or face1 in front of face12.
# Similarly, face12 precedes face2...
if (faceInfo1.isBehindMe[face12] or faceInfo12.isInFrontOfMe[face1]) and (
faceInfo12.isBehindMe[face2] or faceInfo2.isInFrontOfMe[face12]):
continue
break
if k == j:
# switch has been approved...
cluster[i] = face2
cluster[j] = face1
i -= 1
break # TODO: do we need to make sure no infinite loop occurs?
i += 1
def layerSort(self, upFirst):
# sort up-pointing faces from bottom to top and down-pointing faces from top to bottom
upCluster, downCluster, cluster = [], [], []
upZ, downZ = [], []
# go through each face, decide which list to add it to and where
for i in range(0, len(self.faceInfoList)):
if self.faceInfoList[i].used:
continue
face = self.mFaces[i]
idx0 = self.mIndices[face.firstElement + 0]
idx1 = self.mIndices[face.firstElement + 1]
idx2 = self.mIndices[face.firstElement + 2]
v0 = self.mVerts[idx0]
v1 = self.mVerts[idx1]
v2 = self.mVerts[idx2]
# find smallest z
if v0[2] < v1[2]:
smallZ = v0[2]
else:
smallZ = v1[2]
if smallZ < v2[2]:
smallZ = smallZ
else:
smallZ = v2[2]
# find largest z
if v0[2] > v1[2]:
bigZ = v0[2]
else:
bigZ = v1[2]
if smallZ > v2[2]:
bigZ = bigZ
else:
bigZ = v2[2]
if pointUp:
sortBy = smallZ
else:
sortBy = bigZ
if faceInfoList[i].normal[2] > 0.0:
# we face up
if len(upCluster) == 0:
upCluster.append(i)
upZ.append(sortBy)
else:
# keep sorted in order of increasing z (so bottom faces are first)
j = 0
while j < len(upZ):
if sortBy < upZ[j]:
break
j += 1
upZ.insert(j, sortBy)
upCluster.insert(j, i)
else:
# we face down
if len(downCluster) == 0:
downCluster.append(i)
downZ.append(sortBy)
else:
# keep sorted in order of decreasing z (so top faces are first)
j = 0
while j < len(downZ):
if sortBy > downZ[j]:
break
j += 1
downZ.insert(j, sortBy)
downCluster.insert(j, i)
if pointUp:
cluster = upCluster + downCluster
else:
cluster = downCluster + upCluster
# these are for debugging
def anyInFrontOfPlane(self, normal, k):
# make sure no face in use is behind plane
for i in range(0, len(self.mFaces)):
if self.faceInfoList[i].used:
continue
idx0 = self.mIndices[self.mFaces[i].firstElement + 0]
idx1 = self.mIndices[self.mFaces[i].firstElement + 1]
idx2 = self.mIndices[self.mFaces[i].firstElement + 2]
if normal.dot(self.mVerts[idx0]) > k + PlaneF.EPSILON:
return True
if normal.dot(self.mVerts[idx1]) > k + PlaneF.EPSILON:
return True
if normal.dot(self.mVerts[idx2]) > k + PlaneF.EPSILON:
return True
return False
def anyBehindPlane(self, normal, k):
# make sure no face in use is behind plane
for i in range(0, len(self.mFaces)):
if self.faceInfoList[i].used:
continue
idx0 = self.mIndices[self.mFaces[i].firstElement + 0]
idx1 = self.mIndices[self.mFaces[i].firstElement + 1]
idx2 = self.mIndices[self.mFaces[i].firstElement + 2]
if normal.dot(self.mVerts[idx0]) < k - PlaneF.EPSILON:
return True
if normal.dot(self.mVerts[idx1]) < k - PlaneF.EPSILON:
return True
if normal.dot(self.mVerts[idx2]) < k - PlaneF.EPSILON:
return True
return False
#
def generateClusters(self, clusters, faces, indices, retIndex=-1):
global noAddNormals
idx = len(clusters)
clusters = [Cluster(), Cluster()]
# add back faces
clusters[idx].startPrimitive = len(faces)
self.addFaces(self.backClusters, faces, indices)
clusters[idx].endPrimitive = len(faces)
clusters[idx].normal = self.splitNormal
clusters[idx].k = self.splitK
if self.frontSort and self.backSort:
# Note: below there are some lines dealing with the variable "noAddNormal" scattered in. Kind of a hack.
# Here is what it does: it is an optimization. Any face with a normal matching an entry in that list will
# not be added to the mesh. This is desired if we know we are on one side of a plane (then we don't want
# to bother adding faces that face the opposite direction).
# back then front -- but add in opp. order because we know where to return from self.frontSort but not self.backSort
frontSide = len(clusters)
self.frontSort.generateClusters(clusters, faces, indices, idx + 1)
clusters[idx].frontCluster = len(clusters)
noAddNormals.append(-self.splitNormal)
self.backSort.generateClusters(clusters, faces, indices, frontSide)
noAddNormals.pop(-1)
clusters[idx].backCluster = clusters[idx].frontCluster
# front then back -- but add in opp. order because we know where to return from self.backSort but not self.frontSort
backSide = len(clusters)
self.backSort.generateClusters(clusters, faces, indices, idx + 1)
clusters[idx].backCluster = len(clusters)
noAddNormals.append(self.splitNormal)
self.frontSort.generateClusters(clusters, faces, indices, backSide)
noAddNormals.pop(-1)
elif self.frontSort:
clusters[idx].frontCluster = clusters[idx].backCluster = len(clusters)
self.frontSort.generateClusters(clusters, faces, indices, idx + 1)
elif self.backSort:
clusters[idx].frontCluster = clusters[idx].backCluster = len(clusters)
self.backSort.generateClusters(clusters, faces, indices, idx + 1)
else:
self.addOrderedFaces(self.middleCluster, faces, indices, clusters[idx].startPrimitive != len(faces))
self.addFaces(self.frontClusters, faces, indices, clusters[idx].startPrimitive != len(faces))
clusters[idx].endPrimitive = len(faces)
clusters[idx].frontCluster = clusters[idx].backCluster = retIndex
if self.frontSort or self.backSort:
clusters[idx + 1].normal = Vector(0.0, 0.0, 0.0)
clusters[idx + 1].k = 0.0
clusters[idx + 1].startPrimitive = len(faces)
self.addFaces(self.frontClusters, faces, indices)
clusters[idx + 1].endPrimitive = len(faces)
clusters[idx + 1].frontCluster = clusters[idx + 1].backCluster = retIndex
else:
clusters.pop(-1)
from .Torque_Util import *
from .Dts_Mesh import Cluster, DtsMesh, Primitive
from .Dts_Stream import *
| |
import os
import time
from macpath import abspath
def factoriel(n):
if n == 0:
return 1
return n*factoriel(n-1)
class RulerDrawer:
def __init__(self,inches, ticks_lenght):
self.inches = inches
self.ticks = ticks_lenght
def draw(self):
self._draw_line(self.ticks, '0')
for j in range(1, self.inches+1):
self._draw_interval(self.ticks-1)
self._draw_line(self.ticks, str(j))
def _draw_line(self, ticks, inch=''):
line = '-' * ticks
if inch:
line = inch + ' ' + line
print(line)
def _draw_interval(self,central_ticks):
print('hit ' + str(central_ticks))
if central_ticks > 0:
self._draw_interval(central_ticks-1)
self._draw_line(central_ticks)
self._draw_interval(central_ticks-1)
def rbs(arr, target):
'''
Implementation of recursive binary search which running time is O(log base 2 n)
'''
return _rbs(arr,target,0,len(arr)-1)
def _rbs(arr,target,lo,hi):
if lo>hi:
return -1
mid = lo + (hi-lo)//2 # calc median this way to not exceed max int range in case of lo and hi are big numbers
if arr[mid] == target : return mid
elif arr[mid] < target: return _rbs(arr,target,mid+1,hi)
elif arr[mid] > target: return _rbs(arr,target,lo,mid-1)
def disk_usage(path):
'''
Calculates total disk usage of file recursively(if it is a directory and has any childs)
Otherwise just returns the size of the single file
'''
total = os.path.getsize(path)
if os.path.isdir(path):
for filename in os.listdir(path):
abspath = os.path.join(path, filename)
total = total + disk_usage(abspath)
print('%d\t%s' %(total, path))
return total
#bad recursion - solves problem of unique elements(distinct) in exponential time(2^n)-1
def unique3(S,start,stop):
if stop-start <= 1: return True
elif not unique3(S, start, stop-1): return False
elif not unique3(S, start+1, stop): return False
print('comapre ' + str(start) + ' ' + str(stop-1) )
return S[start] != S[stop-1]
#C-4.11
def uniqueQuadratic(S):
'''
Unique elements implemented in O(n^2) time
using recursion instead of iteration
'''
def _uniqueQuadratic(S,k,j):
if j >= len(S):
return True
if j == len(S)-1 and k<len(S)-2:
if not _uniqueQuadratic(S, k+1, k+2): return False
if j<len(S)-1 and not _uniqueQuadratic(S, k, j+1): return False
return S[k] != S[j]
return _uniqueQuadratic(S, 0, 1)
#C-4.12
def product(m,n):
'''
Finds the product of m and n
Runs in O(min(m,n))
'''
def _helper(m,n):
if (n==1):
return m
return m + _helper(m,n-1)
if m>n:
return _helper(m,n)
else:
return _helper(n,m)
def reverse(S,start,stop):
'''
Reverses elements in the list so that a[0] becomes a[n-1] a[1] becomes a[n-1-1] etc
There are 1 + n/2 calls to the function
'''
if start < stop-1:
S[start], S[stop-1] = S[stop-1], S[start]
reverse(S, start+1, stop-1)
def binary_sum(S,start,stop):
'''
O(log2n) space complexity
O(n) time complexity
'''
if start>=stop==0:
return 0
if start==stop-1:
return S[start]
mid = (start+stop)//2
return binary_sum(S, start, mid) + binary_sum(S, mid, stop)
#C-49
def min_max(S,start):
'''
Find min and max of the list in O(n) time
'''
def _min_max(n1,n2):
if isinstance(n2, tuple):
n2 = n2 + (n1,)
return (min(n2), max(n2))
return (min(n1,n2), max(n1,n2))
if start==len(S):
return S[start-1]
return _min_max(S[start], min_max(S, start+1))
def faster_sum(S,start,end):
'''
*** IS ITERATIVE ***
uses O(1) space
uses O(n/2) time
'''
res = 0
while(True):
if start-end==1:
return res + S[start] + S[end]
elif start==end:
return res + S[start]
res += S[start] + S[end]
start+=1
end-=1
#C-4.17
def isPalindrome(S):
'''
Checks if given string is palindrome
'''
if len(S) % 2 == 0: return False
def _isPalindrome(S,start,end):
if start==end:
return True
if S[start] != S[end]:
return False
return _isPalindrome(S,start+1,end-1)
return _isPalindrome(S, 0, len(S)-1)
#C-4.19
def even_before_odds(S):
'''
Reverses list such that for every index of even number any index of odd_number is greater
This function will not sort numbers
Example
input : [3,4,11,1,12,7,14,8]
output : [8,4,14,12,1,7,11,3]
'''
def _helper(S,k,j):
if j > k:
if S[k] % 2 == 1 and S[j] % 2 == 0:
S[k],S[j] = S[j], S[k]
_helper(S,k+1,j-1)
elif S[k] % 2 == 1:
_helper(S,k,j-1)
elif S[j] % 2 == 0:
_helper(S,k+1,j)
_helper(S, 0, len(S)-1)
#P-4.27
def walk(dirr):
'''
Non generator impl of os.walk function
'''
dirpath = []
subdirs = []
files = []
def _walk(dirr):
dirpath.append(dirr)
for subdir in os.listdir(dirr):
abspath = os.path.join(dirr, subdir)
if os.path.isdir(abspath):
subdirs.append(subdir)
_walk(abspath)
else:
files.append(subdir)
_walk(dirr)
return dirpath, subdirs, files
#P-4.23
def listfiles(dirr, target_file_name=None):
'''
Generates all names of files under the specified directory
If target_file_name is supplied function will generate all absolute paths of files which names are same as target argument
'''
for subdir in os.listdir(dirr):
abs = os.path.join(dirr,subdir)
if os.path.isdir(abs):
for f in listfiles(abs, target_file_name):
yield f
elif target_file_name == subdir:
yield abs
elif target_file_name == None:
yield abs
def slower_power(a,b):
if b==0:
return 1
return a*slower_power(a,b-1)
def faster_power(a,b):
if b == 1:
return a
part = faster_power(a,b//2)
res = part*part
if b%2 == 1:
res = res * a
return res
if __name__ == '__main__':
#test ruler drawer
#rd = RulerDrawer(1,3)
#rd.draw()
t = time.time()
print(slower_power(2,560))
print(str(time.time()-t))
t = time.time()
print(faster_power(2, 1024))
print(str(time.time()-t))
#test recursive binary search
#print(rbs([1,2,3,4], 5))
#test reverse list
#S = [1,2,3,4,5]
#reverse(S, 0, 5)
#print(S)
#test disk_usage
#isk_usage('/home/matija/Desktop/productImageHierarchy')
#print(binary_sum([1,2,3,4,5], 0, 5))
#print(faster_sum([1,2,3,4,5], 0, 4))
#print(min_max([11,1,2,4,5,6], 0))
#print(uniqueQuadratic([1,2,3,4,5,6,6]))
#print(product(6, 10))
#print(isPalindrome('racecar'))
#dirpath, subdirs, files = walk('/home/matija/Desktop/walk_test')
#print(dirpath)
#print(subdirs)
#print(files)
#i = 0
#for files in listfiles('/home/matija/Desktop', 'f1'):
#if (i==25): break
#print(files)
#i = i+1
#S = [4,3,5,2,7,6,10]
#even_before_odds(S)
#print(S)
print(product(2, 100000000))
for i in range(5,1,-1):
print(i)
| |
"""
parser.http package (imdb package).
This package provides the IMDbHTTPAccessSystem class used to access
IMDb's data through the web interface.
the imdb.IMDb function will return an instance of this class when
called with the 'accessSystem' argument set to "http" or "web"
or "html" (this is the default).
Copyright 2004-2012 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys
import socket
import logging
from urllib import FancyURLopener, quote_plus
from codecs import lookup
from imdb import IMDbBase, imdbURL_movie_main, imdbURL_person_main, \
imdbURL_character_main, imdbURL_company_main, \
imdbURL_keyword_main, imdbURL_find, imdbURL_top250, \
imdbURL_bottom100
from imdb.utils import analyze_title
from imdb._exceptions import IMDbDataAccessError, IMDbParserError
import searchMovieParser
import searchPersonParser
import searchCharacterParser
import searchCompanyParser
import searchKeywordParser
import movieParser
import personParser
import characterParser
import companyParser
import topBottomParser
# Logger for miscellaneous functions.
_aux_logger = logging.getLogger('imdbpy.parser.http.aux')
IN_GAE = False
try:
import google.appengine
IN_GAE = True
_aux_logger.info('IMDbPY is running in the Google App Engine environment')
except ImportError:
pass
class _ModuleProxy:
"""A proxy to instantiate and access parsers."""
def __init__(self, module, defaultKeys=None, oldParsers=False,
useModule=None, fallBackToNew=False):
"""Initialize a proxy for the given module; defaultKeys, if set,
muste be a dictionary of values to set for instanced objects."""
if oldParsers or fallBackToNew:
_aux_logger.warn('The old set of parsers was removed; falling ' \
'back to the new parsers.')
self.useModule = useModule
if defaultKeys is None:
defaultKeys = {}
self._defaultKeys = defaultKeys
self._module = module
def __getattr__(self, name):
"""Called only when no look-up is found."""
_sm = self._module
# Read the _OBJECTS dictionary to build the asked parser.
if name in _sm._OBJECTS:
_entry = _sm._OBJECTS[name]
# Initialize the parser.
kwds = {}
if self.useModule:
kwds = {'useModule': self.useModule}
parserClass = _entry[0][0]
obj = parserClass(**kwds)
attrsToSet = self._defaultKeys.copy()
attrsToSet.update(_entry[1] or {})
# Set attribute to the object.
for key in attrsToSet:
setattr(obj, key, attrsToSet[key])
setattr(self, name, obj)
return obj
return getattr(_sm, name)
PY_VERSION = sys.version_info[:2]
# The cookies for the "adult" search.
# Please don't mess with these account.
# Old 'IMDbPY' account.
_IMDbPY_cookie_id = 'boM2bYxz9MCsOnH9gZ0S9QHs12NWrNdApxsls1Vb5/NGrNdjcHx3dUas10UASoAjVEvhAbGagERgOpNkAPvxdbfKwaV2ikEj9SzXY1WPxABmDKQwdqzwRbM+12NSeJFGUEx3F8as10WwidLzVshDtxaPIbP13NdjVS9UZTYqgTVGrNcT9vyXU1'
_IMDbPY_cookie_uu = '3M3AXsquTU5Gur/Svik+ewflPm5Rk2ieY3BIPlLjyK3C0Dp9F8UoPgbTyKiGtZp4x1X+uAUGKD7BM2g+dVd8eqEzDErCoYvdcvGLvVLAen1y08hNQtALjVKAe+1hM8g9QbNonlG1/t4S82ieUsBbrSIQbq1yhV6tZ6ArvSbA7rgHc8n5AdReyAmDaJ5Wm/ee3VDoCnGj/LlBs2ieUZNorhHDKK5Q=='
# 'imdbpy2010' account.
_imdbpy2010_cookie_id = 'QrCdxVi+L+WgqOLrQJJgBgRRXGInphxiBPU/YXSFDyExMFzCp6YcYgSVXyEUhS/xMID8wqemHGID4DlntwZ49vemP5UXsAxiJ4D6goSmHGIgNT9hMXBaRSF2vMS3phxB0bVfQiQlP1RxdrzhB6YcRHFASyIhQVowwXCKtDSlD2YhgRvxBsCKtGemHBKH9mxSI='
_imdbpy2010_cookie_uu = 'oiEo2yoJFCA2Zbn/o7Z1LAPIwotAu6QdALv3foDb1x5F/tdrFY63XkSfty4kntS8Y8jkHSDLt3406+d+JThEilPI0mtTaOQdA/t2/iErp22jaLdeVU5ya4PIREpj7HFdpzhEHadcIAngSER50IoHDpD6Bz4Qy3b+UIhE/hBbhz5Q63ceA2hEvhPo5B0FnrL9Q8jkWjDIbA0Au3d+AOtnXoCIRL4Q28c+UOtnXpP4RL4T6OQdA+6ijUCI5B0AW2d+UOtnXpPYRL4T6OQdA8jkTUOYlC0A=='
# old 'IMDbPYweb' account.
_old_IMDbPYweb_cookie_id = 'rH1jNAkjTlNXvHolvBVBsgaPICNZbNdjVjzFwzas9JRmusdjVoqBs/Hs12NR+1WFxEoR9bGKEDUg6sNlADqXwkas12N131Rwdb+UQNGKN8PWrNdjcdqBQVLq8mbGDHP3hqzxhbD692NQi9D0JjpBtRaPIbP1zNdjUOqENQYv1ADWrNcT9vyXU1'
_old_IMDbPYweb_cookie_uu = 'su4/m8cho4c6HP+W1qgq6wchOmhnF0w+lIWvHjRUPJ6nRA9sccEafjGADJ6hQGrMd4GKqLcz2X4z5+w+M4OIKnRn7FpENH7dxDQu3bQEHyx0ZEyeRFTPHfQEX03XF+yeN1dsPpcXaqjUZAw+lGRfXRQEfz3RIX9IgVEffdBAHw2wQXyf9xdMPrQELw0QNB8dsffsqcdQemjPB0w+moLcPh0JrKrHJ9hjBzdMPpcXTH7XRwwOk='
# old 'IMDbPYweb' account values (as of 2012-12-30)
_IMDbPYweb_cookie_id = 'BCYjtpb46Go0cMHAMewWZEauhwqPL7ASCPpPVNutu6BuayHZd0U6Dk3UAqVlEM8DHLDsSr02RGQn5ff3245-R4A130NAWJ_5yqXx7X-zJey8vQM8JKdv3rTUSEJznJQlojUW1Bije-Q0FXAixs4I0sePWhd_tA41i-9AF2q3lPmaksram6ilMhN9i3IPESW1PMbk'
_IMDbPYweb_cookie_uu = 'BCYttQjEMc-NyUdFUGxThidAnBo7wwalEzj4un9uzf2XoEjtqDhNfrH7bOSuwlRkMEQ11SNyTajl-b9Q-21m4HwYu0e3jXZrjYLXLYzFkrEroCDyUREqaTwPJPSjGtFmvlaVBZEZmsWpaxe18DT5KiygKyGPZKH78Xu4im6ba-Sd31WvbXHzP8KGXPpGjhhVuv7Dcv314HCWkE832Srf9ya-Uv0FdGAmYyLbIAXuxnvpYQd6oZ8-CYkSGLIqcKWdrf5S'
# 'IMDbPY2013' account
_IMDbPY2013_cookie_id = 'BCYmoyqSm2WglmOzG-SrFWSvVpxsTZOB0qEOOqmAwCBxCbaNgKOxd0DTKzUvt7t04Pya5gV2tUrpDmYxrc1Dr54DQj2UXI7QI35__M5-HI2KrbOI3PjDz6M-_U3HG8topMfN64R24tmBixoZhMYXVaEc556lf0Z4gQNJVYRANXvwytP5v1lpfeToRlu9aVJwN4kT'
_IMDbPY2013_cookie_uu = 'BCYquDS8Y2i8R1pJxS4nB77YrhjHHXeOea2Xl9KtZvE6RZKVfMvzTGU4Vl5-yxfPbgRSiFJasyf-hhPuVvXyaHlfeBjNlbFT8hz2HzFFkQ_SxKxq05J51gi7Fv4SaAws1M-i7zmQ1TRunfJqCVIYqPwIs2NO7s4_YDH2ZoISVGLgca8OY2K58HychOZB1oRWHVeAJNhLJMrCWJBuGRLCNnQK5X9tA0dPPntr2Ussy0ouul-N1GQz-8y5vda3JJ_C6xkwmHcA6JrOdOFO_HqMWjVSXuxGEdrXC919JM9H0vooVvKeVgAEJnTh2GiVlUJUoH3c'
# Currently used account.
_cookie_id = _IMDbPY2013_cookie_id
_cookie_uu = _IMDbPY2013_cookie_uu
class _FakeURLOpener(object):
"""Fake URLOpener object, used to return empty strings instead of
errors.
"""
def __init__(self, url, headers):
self.url = url
self.headers = headers
def read(self, *args, **kwds): return ''
def close(self, *args, **kwds): pass
def info(self, *args, **kwds): return self.headers
class IMDbURLopener(FancyURLopener):
"""Fetch web pages and handle errors."""
_logger = logging.getLogger('imdbpy.parser.http.urlopener')
def __init__(self, *args, **kwargs):
self._last_url = u''
FancyURLopener.__init__(self, *args, **kwargs)
# Headers to add to every request.
# XXX: IMDb's web server doesn't like urllib-based programs,
# so lets fake to be Mozilla.
# Wow! I'm shocked by my total lack of ethic! <g>
for header in ('User-Agent', 'User-agent', 'user-agent'):
self.del_header(header)
self.set_header('User-Agent', 'Mozilla/5.0')
self.set_header('Accept-Language', 'en-us,en;q=0.5')
# XXX: This class is used also to perform "Exact Primary
# [Title|Name]" searches, and so by default the cookie is set.
c_header = 'uu=%s; id=%s' % (_cookie_uu, _cookie_id)
self.set_header('Cookie', c_header)
def get_proxy(self):
"""Return the used proxy, or an empty string."""
return self.proxies.get('http', '')
def set_proxy(self, proxy):
"""Set the proxy."""
if not proxy:
if self.proxies.has_key('http'):
del self.proxies['http']
else:
if not proxy.lower().startswith('http://'):
proxy = 'http://%s' % proxy
self.proxies['http'] = proxy
def set_header(self, header, value, _overwrite=True):
"""Set a default header."""
if _overwrite:
self.del_header(header)
self.addheaders.append((header, value))
def get_header(self, header):
"""Return the first value of a header, or None
if not present."""
for index in xrange(len(self.addheaders)):
if self.addheaders[index][0] == header:
return self.addheaders[index][1]
return None
def del_header(self, header):
"""Remove a default header."""
for index in xrange(len(self.addheaders)):
if self.addheaders[index][0] == header:
del self.addheaders[index]
break
def retrieve_unicode(self, url, size=-1):
"""Retrieves the given URL, and returns a unicode string,
trying to guess the encoding of the data (assuming latin_1
by default)"""
encode = None
try:
if size != -1:
self.set_header('Range', 'bytes=0-%d' % size)
uopener = self.open(url)
kwds = {}
if PY_VERSION > (2, 3) and not IN_GAE:
kwds['size'] = size
content = uopener.read(**kwds)
self._last_url = uopener.url
# Maybe the server is so nice to tell us the charset...
server_encode = uopener.info().getparam('charset')
# Otherwise, look at the content-type HTML meta tag.
if server_encode is None and content:
begin_h = content.find('text/html; charset=')
if begin_h != -1:
end_h = content[19+begin_h:].find('"')
if end_h != -1:
server_encode = content[19+begin_h:19+begin_h+end_h]
if server_encode:
try:
if lookup(server_encode):
encode = server_encode
except (LookupError, ValueError, TypeError):
pass
uopener.close()
if size != -1:
self.del_header('Range')
self.close()
except IOError, e:
if size != -1:
# Ensure that the Range header is removed.
self.del_header('Range')
raise IMDbDataAccessError({'errcode': e.errno,
'errmsg': str(e.strerror),
'url': url,
'proxy': self.get_proxy(),
'exception type': 'IOError',
'original exception': e})
if encode is None:
encode = 'latin_1'
# The detection of the encoding is error prone...
self._logger.warn('Unable to detect the encoding of the retrieved '
'page [%s]; falling back to default latin1.', encode)
##print unicode(content, encode, 'replace').encode('utf8')
return unicode(content, encode, 'replace')
def http_error_default(self, url, fp, errcode, errmsg, headers):
if errcode == 404:
self._logger.warn('404 code returned for %s: %s (headers: %s)',
url, errmsg, headers)
return _FakeURLOpener(url, headers)
raise IMDbDataAccessError({'url': 'http:%s' % url,
'errcode': errcode,
'errmsg': errmsg,
'headers': headers,
'error type': 'http_error_default',
'proxy': self.get_proxy()})
def open_unknown(self, fullurl, data=None):
raise IMDbDataAccessError({'fullurl': fullurl,
'data': str(data),
'error type': 'open_unknown',
'proxy': self.get_proxy()})
def open_unknown_proxy(self, proxy, fullurl, data=None):
raise IMDbDataAccessError({'proxy': str(proxy),
'fullurl': fullurl,
'error type': 'open_unknown_proxy',
'data': str(data)})
class IMDbHTTPAccessSystem(IMDbBase):
"""The class used to access IMDb's data through the web."""
accessSystem = 'http'
_http_logger = logging.getLogger('imdbpy.parser.http')
def __init__(self, isThin=0, adultSearch=1, proxy=-1, oldParsers=False,
fallBackToNew=False, useModule=None, cookie_id=-1,
timeout=30, cookie_uu=None, *arguments, **keywords):
"""Initialize the access system."""
IMDbBase.__init__(self, *arguments, **keywords)
self.urlOpener = IMDbURLopener()
# When isThin is set, we're parsing the "maindetails" page
# of a movie (instead of the "combined" page) and movie/person
# references are not collected if no defaultModFunct is provided.
#
# NOTE: httpThin was removed since IMDbPY 4.8.
self.isThin = isThin
self._getRefs = True
self._mdparse = False
if isThin:
self._http_logger.warn('"httpThin" access system no longer ' +
'supported; "http" used automatically', exc_info=False)
self.isThin = 0
if self.accessSystem in ('httpThin', 'webThin', 'htmlThin'):
self.accessSystem = 'http'
self.set_timeout(timeout)
self.do_adult_search(adultSearch)
if cookie_id != -1:
if cookie_id is None:
self.del_cookies()
elif cookie_uu is not None:
self.set_cookies(cookie_id, cookie_uu)
if proxy != -1:
self.set_proxy(proxy)
if useModule is not None:
if not isinstance(useModule, (list, tuple)) and ',' in useModule:
useModule = useModule.split(',')
_def = {'_modFunct': self._defModFunct, '_as': self.accessSystem}
# Proxy objects.
self.smProxy = _ModuleProxy(searchMovieParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.spProxy = _ModuleProxy(searchPersonParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.scProxy = _ModuleProxy(searchCharacterParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.scompProxy = _ModuleProxy(searchCompanyParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.skProxy = _ModuleProxy(searchKeywordParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.mProxy = _ModuleProxy(movieParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.pProxy = _ModuleProxy(personParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.cProxy = _ModuleProxy(characterParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.compProxy = _ModuleProxy(companyParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.topBottomProxy = _ModuleProxy(topBottomParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
try:
return '%07d' % int(movieID)
except ValueError, e:
raise IMDbParserError('invalid movieID "%s": %s' % (movieID, e))
def _normalize_personID(self, personID):
"""Normalize the given personID."""
try:
return '%07d' % int(personID)
except ValueError, e:
raise IMDbParserError('invalid personID "%s": %s' % (personID, e))
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
try:
return '%07d' % int(characterID)
except ValueError, e:
raise IMDbParserError('invalid characterID "%s": %s' % \
(characterID, e))
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
try:
return '%07d' % int(companyID)
except ValueError, e:
raise IMDbParserError('invalid companyID "%s": %s' % \
(companyID, e))
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID; in this implementation
the movieID _is_ the imdbID.
"""
return movieID
def get_imdbPersonID(self, personID):
"""Translate a personID in an imdbID; in this implementation
the personID _is_ the imdbID.
"""
return personID
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in an imdbID; in this implementation
the characterID _is_ the imdbID.
"""
return characterID
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in an imdbID; in this implementation
the companyID _is_ the imdbID.
"""
return companyID
def get_proxy(self):
"""Return the used proxy or an empty string."""
return self.urlOpener.get_proxy()
def set_proxy(self, proxy):
"""Set the web proxy to use.
It should be a string like 'http://localhost:8080/'; if the
string is empty, no proxy will be used.
If set, the value of the environment variable HTTP_PROXY is
automatically used.
"""
self.urlOpener.set_proxy(proxy)
def set_timeout(self, timeout):
"""Set the default timeout, in seconds, of the connection."""
try:
timeout = int(timeout)
except Exception:
timeout = 0
if timeout <= 0:
timeout = None
socket.setdefaulttimeout(timeout)
def set_cookies(self, cookie_id, cookie_uu):
"""Set a cookie to access an IMDb's account."""
c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu)
self.urlOpener.set_header('Cookie', c_header)
def del_cookies(self):
"""Remove the used cookie."""
self.urlOpener.del_header('Cookie')
def do_adult_search(self, doAdult,
cookie_id=_cookie_id, cookie_uu=_cookie_uu):
"""If doAdult is true, 'adult' movies are included in the
search results; cookie_id and cookie_uu are optional
parameters to select a specific account (see your cookie
or cookies.txt file."""
if doAdult:
self.set_cookies(cookie_id, cookie_uu)
#c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu)
#self.urlOpener.set_header('Cookie', c_header)
else:
self.urlOpener.del_header('Cookie')
def _retrieve(self, url, size=-1, _noCookies=False):
"""Retrieve the given URL."""
##print url
_cookies = None
# XXX: quite obscene, but in some very limited
# cases (/ttXXXXXXX/epdate) if the cookies
# are set, a 500 error is returned.
if _noCookies:
_cookies = self.urlOpener.get_header('Cookie')
self.del_cookies()
self._http_logger.debug('fetching url %s (size: %d)', url, size)
try:
ret = self.urlOpener.retrieve_unicode(url, size=size)
finally:
if _noCookies and _cookies:
self.urlOpener.set_header('Cookie', _cookies)
return ret
def _get_search_content(self, kind, ton, results):
"""Retrieve the web page for a given search.
kind can be 'tt' (for titles), 'nm' (for names),
'char' (for characters) or 'co' (for companies).
ton is the title or the name to search.
results is the maximum number of results to be retrieved."""
if isinstance(ton, unicode):
try:
ton = ton.encode('utf-8')
except Exception, e:
try:
ton = ton.encode('iso8859-1')
except Exception, e:
pass
##params = 'q=%s&%s=on&mx=%s' % (quote_plus(ton), kind, str(results))
params = 'q=%s&s=%s&mx=%s' % (quote_plus(ton), kind, str(results))
if kind == 'ep':
params = params.replace('s=ep&', 's=tt&ttype=ep&', 1)
cont = self._retrieve(self.urls['find'] % params)
#print 'URL:', imdbURL_find % params
if cont.find('Your search returned more than') == -1 or \
cont.find("displayed the exact matches") == -1:
return cont
# The retrieved page contains no results, because too many
# titles or names contain the string we're looking for.
params = 'q=%s&ls=%s&lm=0' % (quote_plus(ton), kind)
size = 131072 + results * 512
return self._retrieve(self.urls['find'] % params, size=size)
def _search_movie(self, title, results):
# The URL of the query.
# XXX: To retrieve the complete results list:
# params = urllib.urlencode({'more': 'tt', 'q': title})
##params = urllib.urlencode({'tt': 'on','mx': str(results),'q': title})
##params = 'q=%s&tt=on&mx=%s' % (quote_plus(title), str(results))
##cont = self._retrieve(imdbURL_find % params)
cont = self._get_search_content('tt', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def _search_episode(self, title, results):
t_dict = analyze_title(title)
if t_dict['kind'] == 'episode':
title = t_dict['title']
cont = self._get_search_content('ep', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def get_movie_main(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'combined')
return self.mProxy.movie_parser.parse(cont, mdparse=self._mdparse)
def get_movie_full_credits(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'fullcredits')
return self.mProxy.movie_parser.parse(cont)
def get_movie_plot(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'plotsummary')
return self.mProxy.plot_parser.parse(cont, getRefs=self._getRefs)
def get_movie_awards(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'awards')
return self.mProxy.movie_awards_parser.parse(cont)
def get_movie_taglines(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'taglines')
return self.mProxy.taglines_parser.parse(cont)
def get_movie_keywords(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'keywords')
return self.mProxy.keywords_parser.parse(cont)
def get_movie_alternate_versions(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'alternateversions')
return self.mProxy.alternateversions_parser.parse(cont,
getRefs=self._getRefs)
def get_movie_crazy_credits(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'crazycredits')
return self.mProxy.crazycredits_parser.parse(cont,
getRefs=self._getRefs)
def get_movie_goofs(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'goofs')
return self.mProxy.goofs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_quotes(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'quotes')
return self.mProxy.quotes_parser.parse(cont, getRefs=self._getRefs)
def get_movie_release_dates(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'releaseinfo')
ret = self.mProxy.releasedates_parser.parse(cont)
ret['info sets'] = ('release dates', 'akas')
return ret
get_movie_akas = get_movie_release_dates
get_movie_release_info = get_movie_release_dates
def get_movie_vote_details(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'ratings')
return self.mProxy.ratings_parser.parse(cont)
def get_movie_official_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'officialsites')
return self.mProxy.officialsites_parser.parse(cont)
def get_movie_trivia(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'trivia')
return self.mProxy.trivia_parser.parse(cont, getRefs=self._getRefs)
def get_movie_connections(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'movieconnections')
return self.mProxy.connections_parser.parse(cont)
def get_movie_technical(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'technical')
return self.mProxy.tech_parser.parse(cont)
def get_movie_business(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'business')
return self.mProxy.business_parser.parse(cont, getRefs=self._getRefs)
def get_movie_literature(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'literature')
return self.mProxy.literature_parser.parse(cont)
def get_movie_locations(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'locations')
return self.mProxy.locations_parser.parse(cont)
def get_movie_soundtrack(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'soundtrack')
return self.mProxy.soundtrack_parser.parse(cont)
def get_movie_dvd(self, movieID):
self._http_logger.warn('dvd information no longer available', exc_info=False)
return {}
def get_movie_recommendations(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'recommendations')
return self.mProxy.rec_parser.parse(cont)
def get_movie_critic_reviews(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'criticreviews')
return self.mProxy.criticrev_parser.parse(cont)
def get_movie_external_reviews(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'externalreviews')
return self.mProxy.externalrev_parser.parse(cont)
def get_movie_newsgroup_reviews(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'newsgroupreviews')
return self.mProxy.newsgrouprev_parser.parse(cont)
def get_movie_misc_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'miscsites')
return self.mProxy.misclinks_parser.parse(cont)
def get_movie_sound_clips(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'soundsites')
return self.mProxy.soundclips_parser.parse(cont)
def get_movie_video_clips(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'videosites')
return self.mProxy.videoclips_parser.parse(cont)
def get_movie_photo_sites(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'photosites')
return self.mProxy.photosites_parser.parse(cont)
def get_movie_news(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'news')
return self.mProxy.news_parser.parse(cont, getRefs=self._getRefs)
def get_movie_amazon_reviews(self, movieID):
self._http_logger.warn('amazon review no longer available', exc_info=False)
return {}
def get_movie_guests(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'epcast')
return self.mProxy.episodes_cast_parser.parse(cont)
get_movie_episodes_cast = get_movie_guests
def get_movie_merchandising_links(self, movieID):
self._http_logger.warn('merchandising links no longer available',
exc_info=False)
return {}
def _purge_seasons_data(self, data_d):
if '_current_season' in data_d['data']:
del data_d['data']['_current_season']
if '_seasons' in data_d['data']:
del data_d['data']['_seasons']
return data_d
def get_movie_episodes(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'episodes')
data_d = self.mProxy.season_episodes_parser.parse(cont)
if not data_d and 'data' in data_d:
return {}
_current_season = data_d['data'].get('_current_season', '')
_seasons = data_d['data'].get('_seasons') or []
data_d = self._purge_seasons_data(data_d)
data_d['data'].setdefault('episodes', {})
nr_eps = len(data_d['data']['episodes'].get(_current_season) or [])
for season in _seasons:
if season == _current_season:
continue
other_cont = self._retrieve(self.urls['movie_main'] % movieID + 'episodes?season=' + str(season))
other_d = self.mProxy.season_episodes_parser.parse(other_cont)
other_d = self._purge_seasons_data(other_d)
other_d['data'].setdefault('episodes', {})
if not (other_d and other_d['data'] and other_d['data']['episodes'][season]):
continue
nr_eps += len(other_d['data']['episodes'].get(season) or [])
data_d['data']['episodes'][season] = other_d['data']['episodes'][season]
data_d['data']['number of episodes'] = nr_eps
return data_d
def get_movie_episodes_rating(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'epdate', _noCookies=True)
data_d = self.mProxy.eprating_parser.parse(cont)
# set movie['episode of'].movieID for every episode.
if data_d.get('data', {}).has_key('episodes rating'):
for item in data_d['data']['episodes rating']:
episode = item['episode']
episode['episode of'].movieID = movieID
return data_d
def get_movie_faqs(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'faq')
return self.mProxy.movie_faqs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_airing(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'tvschedule')
return self.mProxy.airing_parser.parse(cont)
get_movie_tv_schedule = get_movie_airing
def get_movie_synopsis(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'synopsis')
return self.mProxy.synopsis_parser.parse(cont)
def get_movie_parents_guide(self, movieID):
cont = self._retrieve(self.urls['movie_main'] % movieID + 'parentalguide')
return self.mProxy.parentsguide_parser.parse(cont)
def _search_person(self, name, results):
# The URL of the query.
# XXX: To retrieve the complete results list:
# params = urllib.urlencode({'more': 'nm', 'q': name})
##params = urllib.urlencode({'nm': 'on', 'mx': str(results), 'q': name})
#params = 'q=%s&nm=on&mx=%s' % (quote_plus(name), str(results))
#cont = self._retrieve(imdbURL_find % params)
cont = self._get_search_content('nm', name, results)
return self.spProxy.search_person_parser.parse(cont, results=results)['data']
def get_person_main(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'maindetails')
ret = self.pProxy.maindetails_parser.parse(cont)
ret['info sets'] = ('main', 'filmography')
return ret
def get_person_filmography(self, personID):
return self.get_person_main(personID)
def get_person_biography(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'bio')
return self.pProxy.bio_parser.parse(cont, getRefs=self._getRefs)
def get_person_resume(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'resume')
return self.pProxy.resume_parser.parse(cont, getRefs=self._getRefs)
def get_person_awards(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'awards')
return self.pProxy.person_awards_parser.parse(cont)
def get_person_other_works(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'otherworks')
return self.pProxy.otherworks_parser.parse(cont, getRefs=self._getRefs)
#def get_person_agent(self, personID):
# cont = self._retrieve(self.urls['person_main'] % personID + 'agent')
# return self.pProxy.agent_parser.parse(cont)
def get_person_publicity(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'publicity')
return self.pProxy.publicity_parser.parse(cont)
def get_person_official_sites(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'officialsites')
return self.pProxy.person_officialsites_parser.parse(cont)
def get_person_news(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'news')
return self.pProxy.news_parser.parse(cont)
def get_person_episodes(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'filmoseries')
return self.pProxy.person_series_parser.parse(cont)
def get_person_merchandising_links(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'forsale')
return self.pProxy.sales_parser.parse(cont)
def get_person_genres_links(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'filmogenre')
return self.pProxy.person_genres_parser.parse(cont)
def get_person_keywords_links(self, personID):
cont = self._retrieve(self.urls['person_main'] % personID + 'filmokey')
return self.pProxy.person_keywords_parser.parse(cont)
def _search_character(self, name, results):
cont = self._get_search_content('ch', name, results)
return self.scProxy.search_character_parser.parse(cont, results=results)['data']
def get_character_main(self, characterID):
cont = self._retrieve(self.urls['character_main'] % characterID)
ret = self.cProxy.character_main_parser.parse(cont)
ret['info sets'] = ('main', 'filmography')
return ret
get_character_filmography = get_character_main
def get_character_biography(self, characterID):
cont = self._retrieve(self.urls['character_main'] % characterID + 'bio')
return self.cProxy.character_bio_parser.parse(cont,
getRefs=self._getRefs)
def get_character_episodes(self, characterID):
cont = self._retrieve(self.urls['character_main'] % characterID +
'filmoseries')
return self.cProxy.character_series_parser.parse(cont)
def get_character_quotes(self, characterID):
cont = self._retrieve(self.urls['character_main'] % characterID + 'quotes')
return self.cProxy.character_quotes_parser.parse(cont,
getRefs=self._getRefs)
def _search_company(self, name, results):
cont = self._get_search_content('co', name, results)
url = self.urlOpener._last_url
return self.scompProxy.search_company_parser.parse(cont, url=url,
results=results)['data']
def get_company_main(self, companyID):
cont = self._retrieve(self.urls['company_main'] % companyID)
ret = self.compProxy.company_main_parser.parse(cont)
return ret
def _search_keyword(self, keyword, results):
# XXX: the IMDb web server seems to have some serious problem with
# non-ascii keyword.
# E.g.: http://akas.imdb.com/keyword/fianc%E9/
# will return a 500 Internal Server Error: Redirect Recursion.
keyword = keyword.encode('utf8', 'ignore')
try:
cont = self._get_search_content('kw', keyword, results)
except IMDbDataAccessError:
self._http_logger.warn('unable to search for keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_keyword_parser.parse(cont, results=results)['data']
def _get_keyword(self, keyword, results):
keyword = keyword.encode('utf8', 'ignore')
try:
cont = self._retrieve(self.urls['keyword_main'] % keyword)
except IMDbDataAccessError:
self._http_logger.warn('unable to get keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_moviekeyword_parser.parse(cont, results=results)['data']
def _get_top_bottom_movies(self, kind):
if kind == 'top':
parser = self.topBottomProxy.top250_parser
url = self.urls['top250']
elif kind == 'bottom':
parser = self.topBottomProxy.bottom100_parser
url = self.urls['bottom100']
else:
return []
cont = self._retrieve(url)
return parser.parse(cont)['data']
| |
from adminsortable2.admin import SortableAdminMixin
from django import forms
from django.contrib import admin
from django.contrib.admin import widgets, SimpleListFilter
from django.core.exceptions import ValidationError
from mptt.admin import DraggableMPTTAdmin
from mptt.forms import TreeNodeMultipleChoiceField
from django_admin_listfilter_dropdown.filters import DropdownFilter
from delibere.models import Firmatario, Delibera, Documento, Amministrazione, \
Settore, Normativa
class DocumentoAdminForm(forms.ModelForm):
def clean(self):
if 'file' in self.cleaned_data and self.cleaned_data['file']:
filename = self.cleaned_data['file'].name.split('/')[-1]
codice = "E{0}{1:04d}".format(
self.data['anno'][2:], int(self.data['numero'])
)
if not filename.startswith(codice):
raise ValidationError(
{'file': "Il nome deve iniziare "
"con il codice {0}".format(codice)}
)
class DocumentoInline(admin.TabularInline):
model = Documento
extra = 0
can_delete = True
show_change_link = True
readonly_fields = ('nome', 'estensione',)
form = DocumentoAdminForm
class AnnoDropdownFilter(DropdownFilter):
"""Patch DropdownFilter class to reverse ordering of items in dropdown.
TODO: It's a hack, a cleaner way should be implemented in the original
package
"""
def __init__(self, field, request, params, model, model_admin, field_path):
super(AnnoDropdownFilter, self).__init__(
field, request, params, model, model_admin, field_path)
queryset = model_admin.get_queryset(request)
self.lookup_choices = (queryset
.distinct()
.order_by("-{0}".format(field.name))
.values_list(field.name, flat=True))
class DataSedutaFilter(SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = 'Data della seduta'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'data'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
anno = request.GET.get('anno', None)
items = []
for item in model_admin.model.objects\
.filter(anno=anno)\
.order_by('-data')\
.values_list('data', flat=True)\
.distinct():
items.append((item, item.strftime("%d/%m/%Y")))
return items
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
# Compare the requested value (either '80s' or '90s')
# to decide how to filter the queryset.
if self.value():
return queryset.filter(data=self.value())
else:
return queryset
class DeliberaAdmin(admin.ModelAdmin):
list_display = ('anno', 'data', 'numero', 'descrizione',
'pubblicata', 'cc', 'gu')
list_display_links = ('descrizione',)
list_filter = (
('anno', AnnoDropdownFilter),
DataSedutaFilter,
)
fieldsets = (
('Delibera', {
'fields': ('id', 'codice', 'descrizione', 'pubblicata',
'data', 'anno', 'numero',
'firmatario',
'note'
)
}),
('Corte dei conti', {
'fields': ('cc_data', 'cc_registro', 'cc_foglio'),
}),
('Gazzetta Ufficiale', {
'fields': ('gu_data', 'gu_numero', 'gu_tipologia',
'gu_data_rettifica', 'gu_numero_rettifica'),
}),
('Categorizzazione', {
'fields': (
'amministrazioni', 'settori',
),
}),
)
readonly_fields = ('id', 'slug', 'created_at', 'updated_at', )
filter_horizontal = ('amministrazioni', 'settori', )
search_fields = ('numero', 'anno', 'descrizione')
ordering = ('-anno', '-numero_ord')
inlines = [DocumentoInline,]
save_on_top = True
def cc(self, obj):
return obj.cc_data is not None
cc.boolean = True
def gu(self, obj):
return obj.gu_data is not None
gu.boolean = True
def get_form(self, request, obj=None, **kwargs):
form = super(DeliberaAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['descrizione'].widget = forms.Textarea(
attrs={'rows':'5', 'cols': '80'}
)
form.base_fields['note'].widget = forms.Textarea(
attrs={'rows':'5', 'cols': '80'}
)
form.base_fields['settori'] = TreeNodeMultipleChoiceField(
required=False,
queryset=Settore.objects.all()
)
form.base_fields['settori'].widget = widgets.FilteredSelectMultiple(
'Settori',
False
)
form.base_fields['cc_registro'].widget.attrs.update({
"placeholder": "1",
})
return form
class FirmatarioAdmin(admin.ModelAdmin):
list_display = ('nominativo',)
readonly_fields = ('id', )
search_fields = ('nominativo',)
class AmministrazioneAdmin(SortableAdminMixin, admin.ModelAdmin):
list_display = ('codice', 'denominazione',)
readonly_fields = ('id', )
search_fields = ('denominazione',)
list_display_links = ('denominazione',)
class SettoreAdmin(DraggableMPTTAdmin):
list_display = (
'tree_actions',
'indented_title',
)
readonly_fields = ('id', 'ss_id', 'sss_id' )
search_fields = ('descrizione',)
def get_form(self, request, obj=None, **kwargs):
form = super(SettoreAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['descrizione'].widget.attrs['style'] = 'width: 45em;'
return form
class NormativaAdmin(admin.ModelAdmin):
list_display = ('descrizione',)
readonly_fields = ('id', )
search_fields = ('descrizione',)
admin.site.register(Settore, SettoreAdmin)
admin.site.register(Normativa, NormativaAdmin)
admin.site.register(Amministrazione, AmministrazioneAdmin)
admin.site.register(Firmatario, FirmatarioAdmin)
admin.site.register(Delibera, DeliberaAdmin)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Azure auth method module."""
import logging
from hvac import exceptions, utils
from hvac.api.vault_api_base import VaultApiBase
from hvac.constants.azure import VALID_ENVIRONMENTS
DEFAULT_MOUNT_POINT = "azure"
logger = logging.getLogger(__name__)
class Azure(VaultApiBase):
"""Azure Auth Method (API).
Reference: https://www.vaultproject.io/api/auth/azure/index.html
"""
def configure(
self,
tenant_id,
resource,
environment=None,
client_id=None,
client_secret=None,
mount_point=DEFAULT_MOUNT_POINT,
):
"""Configure the credentials required for the plugin to perform API calls to Azure.
These credentials will be used to query the metadata about the virtual machine.
Supported methods:
POST: /auth/{mount_point}/config. Produces: 204 (empty body)
:param tenant_id: The tenant id for the Azure Active Directory organization.
:type tenant_id: str | unicode
:param resource: The configured URL for the application registered in Azure Active Directory.
:type resource: str | unicode
:param environment: The Azure cloud environment. Valid values: AzurePublicCloud, AzureUSGovernmentCloud,
AzureChinaCloud, AzureGermanCloud.
:type environment: str | unicode
:param client_id: The client id for credentials to query the Azure APIs. Currently read permissions to query
compute resources are required.
:type client_id: str | unicode
:param client_secret: The client secret for credentials to query the Azure APIs.
:type client_secret: str | unicode
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if environment is not None and environment not in VALID_ENVIRONMENTS:
error_msg = 'invalid environment argument provided: "{arg}"; supported environments: "{environments}"'
raise exceptions.ParamValidationError(
error_msg.format(
arg=environment,
environments=",".join(VALID_ENVIRONMENTS),
)
)
params = {
"tenant_id": tenant_id,
"resource": resource,
}
params.update(
utils.remove_nones(
{
"environment": environment,
"client_id": client_id,
"client_secret": client_secret,
}
)
)
api_path = utils.format_url(
"/v1/auth/{mount_point}/config", mount_point=mount_point
)
return self._adapter.post(
url=api_path,
json=params,
)
def read_config(self, mount_point=DEFAULT_MOUNT_POINT):
"""Return the previously configured config, including credentials.
Supported methods:
GET: /auth/{mount_point}/config. Produces: 200 application/json
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The data key from the JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url(
"/v1/auth/{mount_point}/config", mount_point=mount_point
)
response = self._adapter.get(
url=api_path,
)
return response.get("data")
def delete_config(self, mount_point=DEFAULT_MOUNT_POINT):
"""Delete the previously configured Azure config and credentials.
Supported methods:
DELETE: /auth/{mount_point}/config. Produces: 204 (empty body)
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = utils.format_url(
"/v1/auth/{mount_point}/config", mount_point=mount_point
)
return self._adapter.delete(
url=api_path,
)
def create_role(
self,
name,
policies=None,
ttl=None,
max_ttl=None,
period=None,
bound_service_principal_ids=None,
bound_group_ids=None,
bound_locations=None,
bound_subscription_ids=None,
bound_resource_groups=None,
bound_scale_sets=None,
num_uses=None,
mount_point=DEFAULT_MOUNT_POINT,
):
"""Create a role in the method.
Role types have specific entities that can perform login operations against this endpoint. Constraints specific
to the role type must be set on the role. These are applied to the authenticated entities attempting to login.
Supported methods:
POST: /auth/{mount_point}/role/{name}. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param policies: Policies to be set on tokens issued using this role.
:type policies: str | list
:param num_uses: Number of uses to set on a token produced by this role.
:type num_uses: int
:param ttl: The TTL period of tokens issued using this role in seconds.
:type ttl: str | unicode
:param max_ttl: The maximum allowed lifetime of tokens issued in seconds using this role.
:type max_ttl: str | unicode
:param period: If set, indicates that the token generated using this role should never expire. The token should
be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the
value of this parameter.
:type period: str | unicode
:param bound_service_principal_ids: The list of Service Principal IDs that login is restricted to.
:type bound_service_principal_ids: list
:param bound_group_ids: The list of group ids that login is restricted to.
:type bound_group_ids: list
:param bound_locations: The list of locations that login is restricted to.
:type bound_locations: list
:param bound_subscription_ids: The list of subscription IDs that login is restricted to.
:type bound_subscription_ids: list
:param bound_resource_groups: The list of resource groups that login is restricted to.
:type bound_resource_groups: list
:param bound_scale_sets: The list of scale set names that the login is restricted to.
:type bound_scale_sets: list
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if policies is not None:
if not (
isinstance(policies, str)
or (
isinstance(policies, list)
and all(isinstance(p, str) for p in policies)
)
):
error_msg = 'unsupported policies argument provided "{arg}" ({arg_type}), required type: str or List[str]"'
raise exceptions.ParamValidationError(
error_msg.format(
arg=policies,
arg_type=type(policies),
)
)
params = utils.remove_nones(
{
"policies": policies,
"ttl": ttl,
"max_ttl": max_ttl,
"period": period,
"bound_service_principal_ids": bound_service_principal_ids,
"bound_group_ids": bound_group_ids,
"bound_locations": bound_locations,
"bound_subscription_ids": bound_subscription_ids,
"bound_resource_groups": bound_resource_groups,
"bound_scale_sets": bound_scale_sets,
"num_uses": num_uses,
}
)
api_path = utils.format_url(
"/v1/auth/{mount_point}/role/{name}", mount_point=mount_point, name=name
)
return self._adapter.post(
url=api_path,
json=params,
)
def read_role(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""Read the previously registered role configuration.
Supported methods:
GET: /auth/{mount_point}/role/{name}. Produces: 200 application/json
:param name: Name of the role.
:type name: str | unicode
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The "data" key from the JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url(
"/v1/auth/{mount_point}/role/{name}",
mount_point=mount_point,
name=name,
)
response = self._adapter.get(
url=api_path,
)
return response.get("data")
def list_roles(self, mount_point=DEFAULT_MOUNT_POINT):
"""List all the roles that are registered with the plugin.
Supported methods:
LIST: /auth/{mount_point}/role. Produces: 200 application/json
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The "data" key from the JSON response of the request.
:rtype: dict
"""
api_path = utils.format_url(
"/v1/auth/{mount_point}/role", mount_point=mount_point
)
response = self._adapter.list(url=api_path)
return response.get("data")
def delete_role(self, name, mount_point=DEFAULT_MOUNT_POINT):
"""Delete the previously registered role.
Supported methods:
DELETE: /auth/{mount_point}/role/{name}. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
api_path = utils.format_url(
"/v1/auth/{mount_point}/role/{name}",
mount_point=mount_point,
name=name,
)
return self._adapter.delete(
url=api_path,
)
def login(
self,
role,
jwt,
subscription_id=None,
resource_group_name=None,
vm_name=None,
vmss_name=None,
use_token=True,
mount_point=DEFAULT_MOUNT_POINT,
):
"""Fetch a token.
This endpoint takes a signed JSON Web Token (JWT) and a role name for some entity. It verifies the JWT signature
to authenticate that entity and then authorizes the entity for the given role.
Supported methods:
POST: /auth/{mount_point}/login. Produces: 200 application/json
:param role: Name of the role against which the login is being attempted.
:type role: str | unicode
:param jwt: Signed JSON Web Token (JWT) from Azure MSI.
:type jwt: str | unicode
:param subscription_id: The subscription ID for the machine that generated the MSI token. This information can
be obtained through instance metadata.
:type subscription_id: str | unicode
:param resource_group_name: The resource group for the machine that generated the MSI token. This information
can be obtained through instance metadata.
:type resource_group_name: str | unicode
:param vm_name: The virtual machine name for the machine that generated the MSI token. This information can be
obtained through instance metadata. If vmss_name is provided, this value is ignored.
:type vm_name: str | unicode
:param vmss_name: The virtual machine scale set name for the machine that generated the MSI token. This
information can be obtained through instance metadata.
:type vmss_name: str | unicode
:param use_token: if True, uses the token in the response received from the auth request to set the "token"
attribute on the the :py:meth:`hvac.adapters.Adapter` instance under the _adapater Client attribute.
:type use_token: bool
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: dict
"""
params = {
"role": role,
"jwt": jwt,
}
params.update(
utils.remove_nones(
{
"subscription_id": subscription_id,
"resource_group_name": resource_group_name,
"vm_name": vm_name,
"vmss_name": vmss_name,
}
)
)
api_path = utils.format_url(
"/v1/auth/{mount_point}/login", mount_point=mount_point
)
return self._adapter.login(
url=api_path,
use_token=use_token,
json=params,
)
| |
from builtins import range
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.items()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.items():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
# (1) initial hidden state
h0, h0_cache = affine_forward(features,W_proj,b_proj)
# (2) caption_in (indices to vectors)
caption_in_vector, caption_in_vector_cache = word_embedding_forward(captions_in,W_embed)
# (3) RNN / LSTM to generate hidden state
h, h_cache = None, None
if self.cell_type == 'rnn':
h, h_cache = rnn_forward(caption_in_vector, h0, Wx, Wh, b)
else:
h, h_cache = lstm_forward(caption_in_vector, h0, Wx, Wh, b) # LSTM
# (4) affine transformation
vocab_out, vocab_out_cache = temporal_affine_forward(h, W_vocab, b_vocab)
# (5) (temporal) softmax
loss, dx = temporal_softmax_loss(vocab_out, captions_out, mask)
# Backward pass
dx, dw, db = temporal_affine_backward(dx, vocab_out_cache)
grads["W_vocab"] = dw
grads["b_vocab"] = db
dx, dh0, dWx, dWh, db = dx, None, None, None, None
if self.cell_type == 'rnn':
dx, dh0, dWx, dWh, db = rnn_backward(dx,h_cache)
else:
dx, dh0, dWx, dWh, db = lstm_backward(dx,h_cache) # LSTM
grads["Wx"] = dWx
grads["Wh"] = dWh
grads["b"] = db
grads["W_embed"] = word_embedding_backward(dx,caption_in_vector_cache)
_, dw, db = affine_backward(dh0,h0_cache)
grads["W_proj"] = dw
grads["b_proj"] = db
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
###########################################################################
prev_h, _ = affine_forward(features,W_proj,b_proj)
start_word = self._start * np.ones((N, 1), dtype=np.int32)
x, _ = word_embedding_forward(start_word, W_embed) # N x T x D , T=1
x = x.reshape((N,-1)) # N x D
prev_c = np.zeros_like(prev_h)# only for LSTM
for i in range(max_length):
if self.cell_type == 'rnn':
next_h, _ = rnn_step_forward(x, prev_h, Wx, Wh, b)
else:
next_h, prev_c, _ = lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)
words, _ = affine_forward(next_h, W_vocab, b_vocab)
x = np.argmax(words, axis=1) # Most probable words
captions[:,i] = x
x = x.reshape((N,1))
x, _ = word_embedding_forward(x, W_embed) # N x T x D , T=1
x = x.reshape((N,-1)) # N x D
prev_h = next_h
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
| |
import sys
class _EventForwarder(object):
def __init__(self, eventName, forwarder):
self.eventName = eventName
self.forwarder = forwarder
def __call__(self, *args, **kwargs):
self.forwarder(self.eventName, *args, **kwargs)
class EventHook(object):
"""A 'event' Transmitter
You can hook into it with hook += myReceiver (callable)
then hook.fire() will call myReceiver()
(or hook += myobj.onFoo => hook.fire(bar) will call myobj.onFoo(bar))
Normal usage would be:
class DB(object):
def __init__(self):
self.recordCreated = EventHook()
def create(self, entry):
//...code
self.recordCreated.fire(entry)
class DebugPrinter(object):
def printCreatedEntry(self, entry):
db = DB()
dp = DebugPrinter()
db.recordCreated += dp.printCreatedEntry
"""
def __init__(self):
self.__receivers = []
self.fireBlocked = False
self.wasFired = False
def __iadd__(self, handler):
"""Adds a receiver to this EventHook
args:
handler A callable which will be called on fire
:returns: EventHook
"""
self.__receivers.append(handler)
return self
def __isub__(self, handler):
"""Removes a receiver from this EventHook
args:
handler The callable which was previous assigned
:returns: EventHook
"""
self.__receivers.remove(handler)
return self
def fire(self, *args, **keywargs):
"""Fires a 'event'. Not really, it calls every assigned callable
If some callable returns true, it will stop Propagation
:returns: void
"""
if self.fireBlocked:
return
self.wasFired = True
for handler in self.__receivers:
result = handler(*args, **keywargs)
if result:
return result
def forward(self, eventName, forwarder):
self.__iadd__(_EventForwarder(eventName, forwarder))
def __call__(self, *args, **keywargs):
"""Alias for fire(). The main purpose of this method is to allow
chaining of events. So like
instance.hook += my_callable
you can write
instance.hook += my_object.hook
Than the event of instance will be fired if my_object.hook is fired
:rtype: void
"""
return self.fire(*args, **keywargs)
def clearOfType(self, receiverObj):
"""Removes all receivers of the class of the class
ob the passed method
:returns EventHook
"""
deleteLater = set()
for knownReceiver in self.__receivers:
if knownReceiver.im_self == receiverObj:
deleteLater.add(knownReceiver)
for knownReceiver in deleteLater:
self -= knownReceiver
return self
def clear(self):
"""Clears all receivers
:returns: EventHook
"""
self.__receivers = []
return self
def __len__(self):
return len(self.__receivers)
def __iter__(self):
return iter(self.__receivers)
class EventProperty(object):
def __init__(self, name=None, default=None, eventHook=None):
self.__name = name
self._eventHook = EventHook() if eventHook is None else eventHook
self._listeners = {}
self._listenerInstalled = False
self._default = default
def __get__(self, instance, owner):
if instance is None:
return self
return instance.__dict__.get(self._name(instance), self._default)
def __set__(self, instance, value):
name = self._name(instance)
if name in instance.__dict__ and instance.__dict__[name] == value:
return
instance.__dict__[name] = value
self._eventHook.fire(value, instance)
def __delete__(self, instance):
del instance.__dict__[self._name(instance)]
def _name(self, instance):
if self.__name:
return self.__name
cls = instance.__class__
for name in cls.__dict__:
if cls.__dict__[name] is self:
self.__name = name
return name
def __iadd__(self, handler):
"""Adds a receiver to this EventHook
args:
handler A callable which will be called on fire
:returns: EventHook
"""
self._eventHook.__iadd__(handler)
return self
def __isub__(self, handler):
"""Removes a receiver from this EventHook
args:
handler The callable which was previous assigned
:returns: EventHook
"""
self._eventHook.__isub__(handler)
return self
def forward(self, eventName, forwarder):
return self._eventHook.forward(eventName, forwarder)
def listenOn(self, instance, listener):
if instance not in self._listeners:
self._listeners[instance] = []
self._installListener()
self._listeners[instance].append(listener)
def _installListener(self):
if self._listenerInstalled:
return
self._listenerInstalled = True
self._eventHook += self._callListeners
def _callListeners(self, value, instance):
try:
for listener in self._listeners[instance]:
listener(value)
except KeyError:
return
class EventHookProperty(object):
def __init__(self):
self._hooksByInstance = {}
def __get__(self, instance, owner):
if instance is None:
return self
if instance not in self._hooksByInstance:
self._hooksByInstance[instance] = EventHook()
return self._hooksByInstance[instance]
class TestListener(object):
def __init__(self, printOnCalls=False):
self.params = []
self.callCount = 0
self.printOnCalls = printOnCalls
def __call__(self, *args):
self.params = args
self.callCount += 1
if self.printOnCalls:
sys.stdout.write("TestListener.called: count:{} params:{}".format(self.callCount, self.params))
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
if batch_size is None:
batch_size = x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
# Skip first dimension if it is 1.
if y_shape and y_shape[0] == 1:
y_shape = y_shape[1:]
if n_classes is not None and n_classes > 1:
output_shape = [batch_size] + y_shape + [n_classes]
else:
output_shape = [batch_size] + y_shape
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(
x, y, n_classes, batch_size=None, shuffle=True, epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or iterable.
y: numpy, pandas or Dask array or iterable.
n_classes: number of classes.
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
chunk = []
for data in x:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or iterable.
batch_size: Size of batches to split data into.
If `None`, returns one batch of full size.
Returns:
List or iterator of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(
self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: Feature Nd numpy matrix of shape `[n_samples, n_features, ...]`.
y: Target vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence
of targets. Can be `None` for unsupervised setting.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion.
batch_size: Mini-batch size to accumulate.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features.
y: Input target.
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input.
output_shape: Shape of the output.
input_dtype: DType of input.
output_dtype: DType of output.
"""
self._x = check_array(x, dtype=x.dtype)
# self.n_classes is None means we're passing in raw target indices.
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
if n_classes is not None:
self._y = (None if y is None else check_array(y, dtype=y_dtype))
elif isinstance(y, list):
self._y = np.array(y)
else:
self._y = y
self.n_classes = n_classes
self.max_epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
self._x.shape, None if self._y is None else self._y.shape, n_classes,
batch_size)
# Input dtype matches dtype of x.
self._input_dtype = _check_dtype(self._x.dtype)
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None or self._y is None:
self._output_dtype = np.float32
else:
self._output_dtype = _check_dtype(self._y.dtype)
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if self._shuffle:
self.indices = self.random_state.permutation(self._x.shape[0])
else:
self.indices = np.array(range(self._x.shape[0]))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(dtypes.int32, [1],
name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
input_shape = [None] + self.input_shape[1:]
self._input_placeholder = array_ops.placeholder(
dtypes.as_dtype(self._input_dtype),
input_shape,
name='input')
if self.output_shape is None:
self._output_placeholder = None
else:
output_shape = [None] + self.output_shape[1:]
self._output_placeholder = array_ops.placeholder(
dtypes.as_dtype(self._output_dtype),
output_shape,
name='output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be None.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
end = min(self._x.shape[0], self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# Assign input features from random indices.
inp = (
np.array(_access(self._x, batch_indices)).reshape(
(batch_indices.shape[0], 1))
if len(self._x.shape) == 1 else _access(self._x, batch_indices))
feed_dict[self._input_placeholder.name] = inp
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= self._x.shape[0]:
self.indices = self.random_state.permutation(self._x.shape[0])
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# assign labels from random indices
self.output_shape[0] = batch_indices.shape[0]
out = np.zeros(self.output_shape, dtype=self._output_dtype)
for i in xrange(out.shape[0]):
sample = batch_indices[i]
# self.n_classes is None means we're passing in raw target indices
if self.n_classes is None:
out[i] = _access(self._y, sample)
else:
if self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, int(_access(self._y, sample))), 1.0)
else:
for idx, value in enumerate(_access(self._y, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(self._y, sample)
feed_dict[self._output_placeholder.name] = out
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
[1] + list(x_first_el.shape),
[1] + list(y_first_el.shape) if y is not None else None,
n_classes,
batch_size)
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and n_classes > 0:
self._output_dtype = np.float32
elif y is not None:
if isinstance(y_first_el, list) or isinstance(y_first_el, np.ndarray):
self._output_dtype = _check_dtype(np.dtype(type(y_first_el[0])))
else:
self._output_dtype = _check_dtype(np.dtype(type(y_first_el)))
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
Dict of input and output tensors.
"""
if self.stopped:
raise StopIteration
inp = np.zeros(self.input_shape, dtype=self._input_dtype)
if self._y is not None:
out = np.zeros(self.output_shape, dtype=self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
inp[i, :] = six.next(self._x)
except StopIteration:
self.stopped = True
inp = inp[:i, :]
if self._y is not None:
out = out[:i]
break
if self._y is not None:
y = six.next(self._y)
if self.n_classes is not None and self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, y), 1.0)
else:
for idx, value in enumerate(y):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = y
if self._y is None:
return {self._input_placeholder.name: inp}
return {self._input_placeholder.name: inp,
self._output_placeholder.name: out}
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self, x, y, n_classes, batch_size, shuffle=True,
random_state=None, epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp,
output_placeholder.name: encoded_out}
return _feed_dict_fn
| |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
import os
import datetime
import json
import webapp2
import jinja2
import urllib
from google.appengine.api import users
from poker.models import *
from poker.firebase import create_custom_token, send_firebase_message
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), '../templates')),
extensions = [
'jinja2.ext.autoescape',
],
autoescape = True
)
__all__ = [
'MainPage',
'NewGame',
'GameList',
'DeleteGame',
'GamePage',
'GameOpened',
'ToggleCompleteGame',
'NewStory',
'SkipStory',
'CompleteStory',
'NewRound',
'CompleteRound',
'EstimateRound',
'ToggleGameObserver',
'DeleteParticipant',
'GameClosed',
]
class Player():
user = None
profile = None
def __init__(self):
user = users.get_current_user()
if user:
self.user = user
def get_user(self):
return self.user
def get_url(self, dest_url):
if self.user:
return users.create_logout_url('/')
return users.create_login_url(dest_url)
def get_games(self):
return Game.all().filter("user =", self.user)
def get_profile(self):
return None
def get_name(self):
profile = self.get_profile()
if profile and 'displayName' in profile:
return profile['displayName']
return None
def get_photo(self):
profile = self.get_profile()
if profile and 'image' in profile:
return profile['image']['url']
return None
class PokerRequestHandler(webapp2.RequestHandler):
player = None
def get_player(self):
if self.player is None:
player = Player()
return player
def get_user(self, abort = True):
user = self.get_player().get_user()
if abort:
if not user:
self.abort(401)
return user
def get_game(self, game_id, check_user = False):
game = Game.get_by_id(int(game_id))
if not game:
self.abort(404)
if check_user:
user = self.get_player().get_user()
if game.user != user:
self.abort(403)
return game
def get_story(self, game_id, story_id, check_user = False):
game = self.get_game(game_id, check_user)
story = Story.get_by_id(int(story_id), game)
if not story:
self.abort(404)
return story
def get_round(self, game_id, story_id, round_id, check_user = False):
story = self.get_story(game_id, story_id, check_user)
round = Round.get_by_id(int(round_id), story)
if not round:
self.abort(404)
return round
def get_participant(self, game_id, participant_key, check_user = False):
game = self.get_game(game_id, check_user)
participant = Participant.get_by_key_name(str(participant_key), game)
if not participant:
self.abort(404)
return participant
class MainPage(PokerRequestHandler):
def get(self):
player = self.get_player()
user = player.get_user()
if user:
return self.redirect('/game/list')
url = player.get_url(self.request.uri)
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render({
'user': user,
'url': url,
'now': datetime.datetime.now(),
}))
class NewGame(PokerRequestHandler):
def post(self):
user = self.get_user()
try:
game = Game(
name = self.request.get('name'),
deck = int(self.request.get('deck')),
user = user
)
except:
return self.redirect('/')
game.put()
game_url = game.get_url()
return self.redirect(game_url)
class GameList(PokerRequestHandler):
def get(self):
player = self.get_player()
user = player.get_user()
url = player.get_url(self.request.uri)
games = player.get_games().order("-created")
template = JINJA_ENVIRONMENT.get_template('list.html')
self.response.write(template.render({
'user': user,
'player_name': player.get_name(),
'player_photo': player.get_photo(),
'url': url,
'games': games,
'decks': Game.DECK_CHOICES,
'now': datetime.datetime.now(),
}))
class DeleteGame(PokerRequestHandler):
def get(self, game_id):
game = self.get_game(game_id, check_user = True)
game.delete()
return self.redirect('/')
class GamePage(PokerRequestHandler):
def get(self, game_id):
user = self.get_user()
player = self.get_player()
url = player.get_url(self.request.uri)
game = self.get_game(game_id)
deck = json.dumps(game.get_deck())
participant_key = str(game.key().id()) + str(user.user_id())
participant = Participant.get_or_insert(
participant_key,
parent = game,
user = user
)
if not participant.name or not participant.photo:
participant.name = player.get_name()
participant.photo = player.get_photo()
participant.put()
template = JINJA_ENVIRONMENT.get_template('game.html')
channel_id = participant_key
client_auth_token = create_custom_token(channel_id)
message = game.get_message()
initial_message = urllib.unquote(json.dumps(message))
self.response.write(template.render({
'user': user,
'player_name': player.get_name(),
'player_photo': player.get_photo(),
'game': game,
'deck': deck,
'url': url,
'now': datetime.datetime.now(),
'request_url': self.request.url,
'token': client_auth_token,
'channel_id': channel_id,
'initial_message': initial_message,
}))
class GameOpened(PokerRequestHandler):
def post(self, game_id):
user = self.get_user()
game = self.get_game(game_id)
game.send_update()
response = {
'estimates': game.get_user_estimates(user),
}
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(response))
class ToggleCompleteGame(PokerRequestHandler):
def get(self, game_id, toggle):
self.post(game_id, toggle)
return self.redirect('/game/list')
def post(self, game_id, toggle):
game = self.get_game(game_id, check_user = True)
game.completed = toggle == 'complete'
if game.completed:
for story in game.get_stories():
for round in story.get_rounds():
round.completed = True
round.put()
if story.estimate is None:
story.estimate = Story.SKIPPED
story.put()
game.current_story_id = None
game.put()
game.send_update()
class NewStory(PokerRequestHandler):
def post(self, game_id):
game = self.get_game(game_id, check_user = True)
current_story = game.get_current_story()
if game.completed or current_story:
self.abort(403)
try:
story = Story(
parent = game,
name = self.request.get('name')
)
except:
self.abort(400)
story.put()
story.new_round()
game.current_story_id = story.key().id()
game.put()
game.send_update()
class SkipStory(PokerRequestHandler):
def post(self, game_id, story_id):
story = self.get_story(game_id, story_id, check_user = True)
rounds = story.get_rounds()
for round in rounds:
round.completed = True
round.put()
story.estimate = Story.SKIPPED
story.put()
game = story.parent()
game.current_story_id = None
game.put()
game.send_update()
class CompleteStory(PokerRequestHandler):
def post(self, game_id, story_id):
story = self.get_story(game_id, story_id, check_user = True)
game = story.parent()
if game.completed or not story.is_current():
self.abort(403)
deck = game.get_deck()
card = self.request.get('card')
try:
card = int(card)
except ValueError:
self.abort(400)
try:
estimate = deck[card]
except IndexError:
self.abort(400)
rounds = story.get_rounds()
for round in rounds:
round.completed = True
round.put()
story.estimate = card
story.put()
game.current_story_id = None
game.put()
game.send_update()
class NewRound(PokerRequestHandler):
def post(self, game_id, story_id):
story = self.get_story(game_id, story_id, check_user = True)
game = story.parent()
if game.completed or not story.is_current():
self.abort(403)
story.new_round()
game.send_update()
class CompleteRound(PokerRequestHandler):
def post(self, game_id, story_id, round_id):
round = self.get_round(game_id, story_id, round_id, check_user = True)
story = round.parent()
game = story.parent()
if game.completed or not story.is_current():
self.abort(403)
round.completed = True
round.put()
game.send_update()
class EstimateRound(PokerRequestHandler):
def post(self, game_id, story_id, round_id):
round = self.get_round(game_id, story_id, round_id)
user = self.get_user()
story = round.parent()
game = story.parent()
if game.completed or not story.is_current() or round.completed:
self.abort(403)
deck = game.get_deck()
card = self.request.get('card')
try:
card = int(card)
except ValueError:
self.abort(400)
try:
estimate = deck[card]
except IndexError:
self.abort(400)
estimate_key = str(round.key().id()) + str(user.user_id())
estimate = Estimate.get_or_insert(
estimate_key,
parent = round,
user = user,
card = card
)
estimate.put()
count_participants = game.get_participants().filter('observer =', False).count()
count_estimates = round.get_estimates().count()
if count_participants == count_estimates:
round.completed = True
round.put()
game.send_update(force = round.completed, user = user)
class ToggleGameObserver(PokerRequestHandler):
def post(self, game_id, participant_key, observer):
participant = self.get_participant(game_id, participant_key, check_user = True)
participant.observer = observer == 'observer'
participant.put()
game = participant.parent()
game.send_update()
class DeleteParticipant(PokerRequestHandler):
def post(self, game_id, participant_key):
participant = self.get_participant(game_id, participant_key, check_user = True)
game = participant.parent()
if game.user == participant.user:
self.abort(403)
participant.delete()
game.send_update()
class GameClosed(PokerRequestHandler):
def post(self, game_id):
game = self.get_game(game_id)
user = self.get_user()
participant_key = str(game.key().id()) + str(user.user_id())
channel_id = participant_key
send_firebase_message(channel_id, None)
| |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestQueryResults(unittest2.TestCase):
PROJECT = 'project'
JOB_NAME = 'job_name'
JOB_NAME = 'test-synchronous-query'
JOB_TYPE = 'query'
QUERY = 'select count(*) from persons'
TOKEN = 'TOKEN'
def _getTargetClass(self):
from gcloud.bigquery.query import QueryResults
return QueryResults
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _makeResource(self, complete=False):
resource = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'jobComplete': complete,
'errors': [],
'schema': {
'fields': [
{'name': 'full_name', 'type': 'STRING', 'mode': 'REQURED'},
{'name': 'age', 'type': 'INTEGER', 'mode': 'REQURED'},
],
},
}
if complete:
resource['totalRows'] = 1000
resource['rows'] = [
{'f': [
{'v': 'Phred Phlyntstone'},
{'v': 32},
]},
{'f': [
{'v': 'Bharney Rhubble'},
{'v': 33},
]},
{'f': [
{'v': 'Wylma Phlyntstone'},
{'v': 29},
]},
{'f': [
{'v': 'Bhettye Rhubble'},
{'v': 27},
]},
]
resource['pageToken'] = self.TOKEN
resource['totalBytesProcessed'] = 100000
resource['cacheHit'] = False
return resource
def _verifySchema(self, query, resource):
from gcloud.bigquery.table import SchemaField
if 'schema' in resource:
fields = resource['schema']['fields']
self.assertEqual(len(query.schema), len(fields))
for found, expected in zip(query.schema, fields):
self.assertTrue(isinstance(found, SchemaField))
self.assertEqual(found.name, expected['name'])
self.assertEqual(found.field_type, expected['type'])
self.assertEqual(found.mode, expected['mode'])
self.assertEqual(found.description,
expected.get('description'))
self.assertEqual(found.fields, expected.get('fields'))
else:
self.assertTrue(query.schema is None)
def _verifyRows(self, query, resource):
expected = resource.get('rows')
if expected is None:
self.assertEqual(query.rows, [])
else:
found = query.rows
self.assertEqual(len(found), len(expected))
for f_row, e_row in zip(found, expected):
self.assertEqual(f_row,
tuple([cell['v'] for cell in e_row['f']]))
def _verifyResourceProperties(self, query, resource):
self.assertEqual(query.cache_hit, resource.get('cacheHit'))
self.assertEqual(query.complete, resource.get('jobComplete'))
self.assertEqual(query.errors, resource.get('errors'))
self.assertEqual(query.page_token, resource.get('pageToken'))
self.assertEqual(query.total_rows, resource.get('totalRows'))
self.assertEqual(query.total_bytes_processed,
resource.get('totalBytesProcessed'))
if 'jobReference' in resource:
self.assertEqual(query.name, resource['jobReference']['jobId'])
else:
self.assertTrue(query.name is None)
self._verifySchema(query, resource)
self._verifyRows(query, resource)
def test_ctor(self):
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
self.assertEqual(query.query, self.QUERY)
self.assertTrue(query._client is client)
self.assertTrue(query.cache_hit is None)
self.assertTrue(query.complete is None)
self.assertTrue(query.errors is None)
self.assertTrue(query.name is None)
self.assertTrue(query.page_token is None)
self.assertEqual(query.rows, [])
self.assertTrue(query.schema is None)
self.assertTrue(query.total_rows is None)
self.assertTrue(query.total_bytes_processed is None)
self.assertTrue(query.default_dataset is None)
self.assertTrue(query.max_results is None)
self.assertTrue(query.preserve_nulls is None)
self.assertTrue(query.use_query_cache is None)
def test_job_wo_jobid(self):
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
self.assertTrue(query.job is None)
def test_job_w_jobid(self):
from gcloud.bigquery.job import QueryJob
SERVER_GENERATED = 'SERVER_GENERATED'
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
query._properties['jobReference'] = {
'projectId': self.PROJECT,
'jobId': SERVER_GENERATED,
}
job = query.job
self.assertTrue(isinstance(job, QueryJob))
self.assertEqual(job.query, self.QUERY)
self.assertTrue(job._client is client)
self.assertEqual(job.name, SERVER_GENERATED)
fetched_later = query.job
self.assertTrue(fetched_later is job)
def test_schema(self):
client = _Client(self.PROJECT)
query = self._makeOne(self.QUERY, client)
self._verifyResourceProperties(query, {})
resource = {
'schema': {
'fields': [
{'name': 'full_name', 'type': 'STRING', 'mode': 'REQURED'},
{'name': 'age', 'type': 'INTEGER', 'mode': 'REQURED'},
],
},
}
query._set_properties(resource)
self._verifyResourceProperties(query, resource)
def test_run_w_bound_client(self):
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=False)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query.run()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {'query': self.QUERY}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_run_w_alternate_client(self):
PATH = 'projects/%s/queries' % self.PROJECT
RESOURCE = self._makeResource(complete=True)
DATASET = 'test_dataset'
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
query = self._makeOne(self.QUERY, client1)
query.default_dataset = client2.dataset(DATASET)
query.max_results = 100
query.preserve_nulls = True
query.timeout_ms = 20000
query.use_query_cache = False
query.run(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'query': self.QUERY,
'defaultDataset': {
'projectId': self.PROJECT,
'datasetId': DATASET,
},
'maxResults': 100,
'preserveNulls': True,
'timeoutMs': 20000,
'useQueryCache': False,
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(query, RESOURCE)
def test_fetch_data_query_not_yet_run(self):
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
self.assertRaises(ValueError, query.fetch_data)
def test_fetch_data_w_bound_client(self):
PATH = 'projects/%s/queries/%s' % (self.PROJECT, self.JOB_NAME)
BEFORE = self._makeResource(complete=False)
AFTER = self._makeResource(complete=True)
conn = _Connection(AFTER)
client = _Client(project=self.PROJECT, connection=conn)
query = self._makeOne(self.QUERY, client)
query._set_properties(BEFORE)
self.assertFalse(query.complete)
rows, total_rows, page_token = query.fetch_data()
self.assertTrue(query.complete)
self.assertEqual(len(rows), 4)
self.assertEqual(rows[0], ('Phred Phlyntstone', 32))
self.assertEqual(rows[1], ('Bharney Rhubble', 33))
self.assertEqual(rows[2], ('Wylma Phlyntstone', 29))
self.assertEqual(rows[3], ('Bhettye Rhubble', 27))
self.assertEqual(total_rows, AFTER['totalRows'])
self.assertEqual(page_token, AFTER['pageToken'])
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
def test_fetch_data_w_alternate_client(self):
PATH = 'projects/%s/queries/%s' % (self.PROJECT, self.JOB_NAME)
MAX = 10
TOKEN = 'TOKEN'
START = 2257
TIMEOUT = 20000
BEFORE = self._makeResource(complete=False)
AFTER = self._makeResource(complete=True)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(AFTER)
client2 = _Client(project=self.PROJECT, connection=conn2)
query = self._makeOne(self.QUERY, client1)
query._set_properties(BEFORE)
self.assertFalse(query.complete)
rows, total_rows, page_token = query.fetch_data(
client=client2, max_results=MAX, page_token=TOKEN,
start_index=START, timeout_ms=TIMEOUT)
self.assertTrue(query.complete)
self.assertEqual(len(rows), 4)
self.assertEqual(rows[0], ('Phred Phlyntstone', 32))
self.assertEqual(rows[1], ('Bharney Rhubble', 33))
self.assertEqual(rows[2], ('Wylma Phlyntstone', 29))
self.assertEqual(rows[3], ('Bhettye Rhubble', 27))
self.assertEqual(total_rows, AFTER['totalRows'])
self.assertEqual(page_token, AFTER['pageToken'])
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'],
{'maxResults': MAX,
'pageToken': TOKEN,
'startIndex': START,
'timeoutMs': TIMEOUT})
class _Client(object):
def __init__(self, project='project', connection=None):
self.project = project
self.connection = connection
def dataset(self, name):
from gcloud.bigquery.dataset import Dataset
return Dataset(name, client=self)
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
from gcloud.exceptions import NotFound
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
| |
"""Sensor platform for hvv."""
from datetime import timedelta
import logging
from aiohttp import ClientConnectorError
from pygti.exceptions import InvalidAuth
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ID
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import Throttle
from homeassistant.util.dt import get_time_zone, utcnow
from .const import ATTRIBUTION, CONF_STATION, DOMAIN, MANUFACTURER
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
MAX_LIST = 20
MAX_TIME_OFFSET = 360
ICON = "mdi:bus"
ATTR_DEPARTURE = "departure"
ATTR_LINE = "line"
ATTR_ORIGIN = "origin"
ATTR_DIRECTION = "direction"
ATTR_TYPE = "type"
ATTR_DELAY = "delay"
ATTR_NEXT = "next"
PARALLEL_UPDATES = 0
BERLIN_TIME_ZONE = get_time_zone("Europe/Berlin")
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_devices: AddEntitiesCallback,
) -> None:
"""Set up the sensor platform."""
hub = hass.data[DOMAIN][config_entry.entry_id]
session = aiohttp_client.async_get_clientsession(hass)
sensor = HVVDepartureSensor(hass, config_entry, session, hub)
async_add_devices([sensor], True)
class HVVDepartureSensor(SensorEntity):
"""HVVDepartureSensor class."""
def __init__(self, hass, config_entry, session, hub):
"""Initialize."""
self.config_entry = config_entry
self.station_name = self.config_entry.data[CONF_STATION]["name"]
self.attr = {ATTR_ATTRIBUTION: ATTRIBUTION}
self._available = False
self._state = None
self._name = f"Departures at {self.station_name}"
self._last_error = None
self.gti = hub.gti
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Update the sensor."""
departure_time = utcnow() + timedelta(
minutes=self.config_entry.options.get("offset", 0)
)
departure_time_tz_berlin = departure_time.astimezone(BERLIN_TIME_ZONE)
payload = {
"station": self.config_entry.data[CONF_STATION],
"time": {
"date": departure_time_tz_berlin.strftime("%d.%m.%Y"),
"time": departure_time_tz_berlin.strftime("%H:%M"),
},
"maxList": MAX_LIST,
"maxTimeOffset": MAX_TIME_OFFSET,
"useRealtime": self.config_entry.options.get("realtime", False),
}
if "filter" in self.config_entry.options:
payload.update({"filter": self.config_entry.options["filter"]})
try:
data = await self.gti.departureList(payload)
except InvalidAuth as error:
if self._last_error != InvalidAuth:
_LOGGER.error("Authentication failed: %r", error)
self._last_error = InvalidAuth
self._available = False
except ClientConnectorError as error:
if self._last_error != ClientConnectorError:
_LOGGER.warning("Network unavailable: %r", error)
self._last_error = ClientConnectorError
self._available = False
except Exception as error: # pylint: disable=broad-except
if self._last_error != error:
_LOGGER.error("Error occurred while fetching data: %r", error)
self._last_error = error
self._available = False
if not (data["returnCode"] == "OK" and data.get("departures")):
self._available = False
return
if self._last_error == ClientConnectorError:
_LOGGER.debug("Network available again")
self._last_error = None
departure = data["departures"][0]
line = departure["line"]
delay = departure.get("delay", 0)
self._available = True
self._state = (
departure_time
+ timedelta(minutes=departure["timeOffset"])
+ timedelta(seconds=delay)
)
self.attr.update(
{
ATTR_LINE: line["name"],
ATTR_ORIGIN: line["origin"],
ATTR_DIRECTION: line["direction"],
ATTR_TYPE: line["type"]["shortInfo"],
ATTR_ID: line["id"],
ATTR_DELAY: delay,
}
)
departures = []
for departure in data["departures"]:
line = departure["line"]
delay = departure.get("delay", 0)
departures.append(
{
ATTR_DEPARTURE: departure_time
+ timedelta(minutes=departure["timeOffset"])
+ timedelta(seconds=delay),
ATTR_LINE: line["name"],
ATTR_ORIGIN: line["origin"],
ATTR_DIRECTION: line["direction"],
ATTR_TYPE: line["type"]["shortInfo"],
ATTR_ID: line["id"],
ATTR_DELAY: delay,
}
)
self.attr[ATTR_NEXT] = departures
@property
def unique_id(self):
"""Return a unique ID to use for this sensor."""
station_id = self.config_entry.data[CONF_STATION]["id"]
station_type = self.config_entry.data[CONF_STATION]["type"]
return f"{self.config_entry.entry_id}-{station_id}-{station_type}"
@property
def device_info(self):
"""Return the device info for this sensor."""
return DeviceInfo(
identifiers={
(
DOMAIN,
self.config_entry.entry_id,
self.config_entry.data[CONF_STATION]["id"],
self.config_entry.data[CONF_STATION]["type"],
)
},
manufacturer=MANUFACTURER,
name=self._name,
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return ICON
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return SensorDeviceClass.TIMESTAMP
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self.attr
| |
"""Legacy code from EPlusInterface"""
# Copyright (C) 2004 Santosh Philip
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
#this is a test version ... not for real use
#dammit i am using it
import copy
import eppy.EPlusInterfaceFunctions.mylib2 as mylib2
import eppy.EPlusInterfaceFunctions.mylib3 as mylib3
def removecomment(astr, cphrase):
"""
the comment is similar to that in python.
any charachter after the # is treated as a comment
until the end of the line
astr is the string to be de-commented
cphrase is the comment phrase"""
linesep = mylib3.getlinesep(astr)
alist = astr.split(linesep)
for i in range(len(alist)):
alist1 = alist[i].split(cphrase)
alist[i] = alist1[0]
# return string.join(alist, linesep)
return linesep.join(alist)
class Idd(object):
"""Idd object"""
def __init__(self, dictfile, version=2):
if version == 2:
# version == 2. This is a just a flag I am using
# it may wind up being the only type... then I can clean this up
# and not use the other option
self.dt, self.dtls = self.initdict2(dictfile)
return
self.dt, self.dtls = self.initdict(dictfile)
def initdict2(self, dictfile):
"""initdict2"""
dt = {}
dtls = []
adict = dictfile
for element in adict:
dt[element[0].upper()] = [] #dict keys for objects always in caps
dtls.append(element[0].upper())
return dt, dtls
def initdict(self, fname):
"""initdict"""
astr = mylib2.readfile(fname)
nocom = removecomment(astr, '!')
idfst = nocom
alist = idfst.split(';')
lss = []
for element in alist:
lst = element.split(',')
lss.append(lst)
for i in range(0, len(lss)):
for j in range(0, len(lss[i])):
lss[i][j] = lss[i][j].strip()
dt = {}
dtls = []
for element in lss:
if element[0] == '':
continue
dt[element[0].upper()] = []
dtls.append(element[0].upper())
return dt, dtls
class Eplusdata(object):
"""Eplusdata"""
def __init__(self, dictfile=None, fname=None):
# import pdb; pdb.set_trace()
if fname == None and dictfile == None:
self.dt, self.dtls = {}, []
if isinstance(dictfile, str) and fname == None:
self.initdict(dictfile)
if isinstance(dictfile, Idd) and fname == None:
self.initdict(dictfile)
if isinstance(fname, str) and isinstance(dictfile, str):
fnamefobject = open(fname, 'rb')
self.makedict(dictfile, fnamefobject)
if isinstance(fname, str) and isinstance(dictfile, Idd):
fnamefobject = open(fname, 'rb')
self.makedict(dictfile, fnamefobject)
from io import StringIO
try:
# will fial in python3 because of file
if isinstance(
fname, (file, StringIO)) and isinstance(dictfile, str):
self.makedict(dictfile, fname)
if isinstance(
fname, (file, StringIO)) and isinstance(dictfile, Idd):
self.makedict(dictfile, fname)
except NameError:
from io import IOBase
if isinstance(
fname, (IOBase, StringIO)) and isinstance(dictfile, str):
self.makedict(dictfile, fname)
if isinstance(
fname, (IOBase, StringIO)) and isinstance(dictfile, Idd):
self.makedict(dictfile, fname)
def __repr__(self):
#print dictionary
dt = self.dt
dtls = self.dtls
DOSSEP = mylib3.UNIXSEP # using a unix EOL
astr = ''
for node in dtls:
nodedata = dt[node.upper()]
for block in nodedata:
for i in range(len(block)):
fformat = ' %s,'+ DOSSEP
if i == 0:
fformat = '%s,'+ DOSSEP
if i == len(block)-1:
fformat = ' %s;'+ DOSSEP*2
astr = astr+ fformat %block[i]
return astr
#------------------------------------------
def initdict(self, fname):
"""create a blank dictionary"""
if isinstance(fname, Idd):
self.dt, self.dtls = fname.dt, fname.dtls
return self.dt, self.dtls
astr = mylib2.readfile(fname)
nocom = removecomment(astr, '!')
idfst = nocom
alist = idfst.split(';')
lss = []
for element in alist:
lst = element.split(',')
lss.append(lst)
for i in range(0, len(lss)):
for j in range(0, len(lss[i])):
lss[i][j] = lss[i][j].strip()
dt = {}
dtls = []
for element in lss:
if element[0] == '':
continue
dt[element[0].upper()] = []
dtls.append(element[0].upper())
self.dt, self.dtls = dt, dtls
return dt, dtls
#------------------------------------------
def makedict(self, dictfile, fnamefobject):
"""stuff file data into the blank dictionary"""
#fname = './exapmlefiles/5ZoneDD.idf'
#fname = './1ZoneUncontrolled.idf'
if isinstance(dictfile, Idd):
localidd = copy.deepcopy(dictfile)
dt, dtls = localidd.dt, localidd.dtls
else:
dt, dtls = self.initdict(dictfile)
# astr = mylib2.readfile(fname)
astr = fnamefobject.read()
try:
astr = astr.decode('ISO-8859-2')
except AttributeError:
pass
fnamefobject.close()
nocom = removecomment(astr, '!')
idfst = nocom
# alist = string.split(idfst, ';')
alist = idfst.split(';')
lss = []
for element in alist:
# lst = string.split(element, ',')
lst = element.split(',')
lss.append(lst)
for i in range(0, len(lss)):
for j in range(0, len(lss[i])):
lss[i][j] = lss[i][j].strip()
for element in lss:
node = element[0].upper()
if node in dt:
#stuff data in this key
dt[node.upper()].append(element)
else:
#scream
if node == '':
continue
print('this node -%s-is not present in base dictionary'%(node))
self.dt, self.dtls = dt, dtls
return dt, dtls
def replacenode(self, othereplus, node):
"""replace the node here with the node from othereplus"""
node = node.upper()
self.dt[node.upper()] = othereplus.dt[node.upper()]
def add2node(self, othereplus, node):
"""add the node here with the node from othereplus
this will potentially have duplicates"""
node = node.upper()
self.dt[node.upper()] = self.dt[node.upper()] + othereplus.dt[node.upper()]
def addinnode(self, otherplus, node, objectname):
"""add an item to the node.
example: add a new zone to the element 'ZONE' """
# do a test for unique object here
newelement = otherplus.dt[node.upper()]
def getrefs(self, reflist):
"""
reflist is got from getobjectref in parse_idd.py
getobjectref returns a dictionary.
reflist is an item in the dictionary
getrefs gathers all the fields refered by reflist
"""
alist = []
for element in reflist:
if element[0].upper() in self.dt:
for elm in self.dt[element[0].upper()]:
alist.append(elm[element[1]])
return alist
#------------------------------------------
| |
import sublime, sublime_plugin
import os.path
# Normal: Motions apply to all the characters they select
MOTION_MODE_NORMAL = 0
# Used in visual line mode: Motions are extended to BOL and EOL.
MOTION_MODE_LINE = 2
# Registers are used for clipboards and macro storage
g_registers = {}
# Represents the current input state. The primary commands that interact with
# this are:
# * set_action
# * set_motion
# * push_repeat_digit
class InputState:
prefix_repeat_digits = []
action_command = None
action_command_args = None
action_description = None
motion_repeat_digits = []
motion_command = None
motion_command_args = None
motion_mode = MOTION_MODE_NORMAL
motion_mode_overridden = False
motion_inclusive = False
motion_clip_to_line = False
register = None
g_input_state = InputState()
# Updates the status bar to reflect the current mode and input state
def update_status_line(view):
desc = []
if view.settings().get('command_mode'):
if g_input_state.motion_mode == MOTION_MODE_LINE:
desc = ['VISUAL LINE MODE']
elif view.has_non_empty_selection_region():
desc = ['VISUAL MODE']
else:
desc = ['COMMAND MODE']
if g_input_state.action_command is not None:
if g_input_state.action_description:
desc.append(g_input_state.action_description)
else:
desc.append(g_input_state.action_command)
repeat = (digits_to_number(g_input_state.prefix_repeat_digits)
* digits_to_number(g_input_state.motion_repeat_digits))
if repeat != 1:
if g_input_state.action_command is not None:
desc[-1] += " * " + str(repeat)
else:
desc.append("* " + str(repeat))
if g_input_state.register is not None:
desc.insert(1, 'Register "' + g_input_state.register + '"')
else:
desc = ['INSERT MODE']
view.set_status('mode', ' - '.join(desc))
def set_motion_mode(view, mode):
g_input_state.motion_mode = mode
update_status_line(view)
def reset_input_state(view, reset_motion_mode = True):
global g_input_state
g_input_state.prefix_repeat_digits = []
g_input_state.action_command = None
g_input_state.action_command_args = None
g_input_state.action_description = None
g_input_state.motion_repeat_digits = []
g_input_state.motion_command = None
g_input_state.motion_mode_overridden = False
g_input_state.motion_command_args = None
g_input_state.motion_inclusive = False
g_input_state.motion_clip_to_line = False
g_input_state.register = None
if reset_motion_mode:
set_motion_mode(view, MOTION_MODE_NORMAL)
class ViCancelCurrentAction(sublime_plugin.TextCommand):
def run(self, action, action_args = {}, motion_mode = None, description = None):
reset_input_state(self.view, True)
def string_to_motion_mode(mode):
if mode == 'normal':
return MOTION_MODE_NORMAL
elif mode == 'line':
return MOTION_MODE_LINE
else:
return -1
# Called when the plugin is unloaded (e.g., perhaps it just got added to
# ignored_packages). Ensure files aren't left in command mode.
def plugin_unloaded():
for w in sublime.windows():
for v in w.views():
v.settings().set('command_mode', False)
v.settings().set('inverse_caret_state', False)
v.erase_status('mode')
def plugin_loaded():
for w in sublime.windows():
for v in w.views():
if v.settings().get("vintage_start_in_command_mode"):
v.settings().set('command_mode', True)
v.settings().set('inverse_caret_state', True)
update_status_line(v)
# Ensures the input state is reset when the view changes, or the user selects
# with the mouse or non-vintage key bindings
class InputStateTracker(sublime_plugin.EventListener):
def on_activated(self, view):
reset_input_state(view)
def on_deactivated(self, view):
reset_input_state(view)
# Ensure that insert mode actions will no longer be grouped, otherwise
# it can lead to the impression that too much is undone at once
view.run_command('unmark_undo_groups_for_gluing')
def on_post_save(self, view):
# Ensure that insert mode actions will no longer be grouped, so it's
# always possible to undo back to the last saved state
view.run_command('unmark_undo_groups_for_gluing')
def on_selection_modified(self, view):
reset_input_state(view, False)
# Get out of visual line mode if the selection has changed, e.g., due
# to clicking with the mouse
if (g_input_state.motion_mode == MOTION_MODE_LINE and
not view.has_non_empty_selection_region()):
g_input_state.motion_mode = MOTION_MODE_NORMAL
update_status_line(view)
def on_load(self, view):
if view.settings().get("vintage_start_in_command_mode"):
view.run_command('exit_insert_mode')
def on_new(self, view):
self.on_load(view)
def on_clone(self, view):
self.on_load(view)
def on_query_context(self, view, key, operator, operand, match_all):
if key == "vi_action" and g_input_state.action_command:
if operator == sublime.OP_EQUAL:
return operand == g_input_state.action_command
if operator == sublime.OP_NOT_EQUAL:
return operand != g_input_state.action_command
elif key == "vi_has_action":
v = g_input_state.action_command is not None
if operator == sublime.OP_EQUAL: return v == operand
if operator == sublime.OP_NOT_EQUAL: return v != operand
elif key == "vi_has_register":
r = g_input_state.register is not None
if operator == sublime.OP_EQUAL: return r == operand
if operator == sublime.OP_NOT_EQUAL: return r != operand
elif key == "vi_motion_mode":
m = string_to_motion_mode(operand)
if operator == sublime.OP_EQUAL:
return m == g_input_state.motion_mode
if operator == sublime.OP_NOT_EQUAL:
return m != g_input_state.motion_mode
elif key == "vi_has_repeat_digit":
if g_input_state.action_command:
v = len(g_input_state.motion_repeat_digits) > 0
else:
v = len(g_input_state.prefix_repeat_digits) > 0
if operator == sublime.OP_EQUAL: return v == operand
if operator == sublime.OP_NOT_EQUAL: return v != operand
elif key == "vi_has_input_state":
v = (len(g_input_state.motion_repeat_digits) > 0 or
len(g_input_state.prefix_repeat_digits) > 0 or
g_input_state.action_command is not None or
g_input_state.register is not None)
if operator == sublime.OP_EQUAL: return v == operand
if operator == sublime.OP_NOT_EQUAL: return v != operand
elif key == "vi_can_enter_text_object":
v = (g_input_state.action_command is not None) or view.has_non_empty_selection_region()
if operator == sublime.OP_EQUAL: return v == operand
if operator == sublime.OP_NOT_EQUAL: return v != operand
return None
# Called when g_input_state represents a fully formed command. Generates a
# call to vi_eval, which is what will be left on the undo/redo stack.
def eval_input(view):
global g_input_state
cmd_args = {
'action_command': g_input_state.action_command,
'action_args': g_input_state.action_command_args,
'motion_command': g_input_state.motion_command,
'motion_args': g_input_state.motion_command_args,
'motion_mode': g_input_state.motion_mode,
'motion_inclusive': g_input_state.motion_inclusive,
'motion_clip_to_line': g_input_state.motion_clip_to_line }
if len(g_input_state.prefix_repeat_digits) > 0:
cmd_args['prefix_repeat'] = digits_to_number(g_input_state.prefix_repeat_digits)
if len(g_input_state.motion_repeat_digits) > 0:
cmd_args['motion_repeat'] = digits_to_number(g_input_state.motion_repeat_digits)
if g_input_state.register is not None:
if not cmd_args['action_args']:
cmd_args['action_args'] = {}
cmd_args['action_args']['register'] = g_input_state.register
reset_motion_mode = (g_input_state.action_command is not None)
reset_input_state(view, reset_motion_mode)
view.run_command('vi_eval', cmd_args)
# Adds a repeat digit to the input state.
# Repeat digits may come before the action, after the action, or both. For
# example:
# 4dw
# d4w
# 2d2w
# These commands will all delete 4 words.
class PushRepeatDigit(sublime_plugin.TextCommand):
def run(self, edit, digit):
global g_input_state
if g_input_state.action_command:
g_input_state.motion_repeat_digits.append(digit)
else:
g_input_state.prefix_repeat_digits.append(digit)
update_status_line(self.view)
# Set the current action in the input state. Note that this won't create an
# entry on the undo stack: only eval_input does this.
class SetAction(sublime_plugin.TextCommand):
# Custom version of run_, so an edit object isn't created. This allows
# eval_input() to add the desired command to the undo stack
def run_(self, edit_token, args):
if 'event' in args:
del args['event']
return self.run(**args)
def run(self, action, action_args = {}, description = None):
global g_input_state
g_input_state.action_command = action
g_input_state.action_command_args = action_args
g_input_state.action_description = description
if self.view.has_non_empty_selection_region():
# Currently in visual mode, so no following motion is expected:
# eval the current input
eval_input(self.view)
else:
update_status_line(self.view)
def digits_to_number(digits):
if len(digits) == 0:
return 1
number = 0
place = 1
for d in reversed(digits):
number += place * int(d)
place *= 10
return number
# Set the current motion in the input state. Note that this won't create an
# entry on the undo stack: only eval_input does this.
class SetMotion(sublime_plugin.TextCommand):
# Custom version of run_, so an edit object isn't created. This allows
# eval_input() to add the desired command to the undo stack
def run_(self, edit_token, args):
return self.run(**args)
def run(self, motion, motion_args = {}, linewise = False, inclusive = False,
clip_to_line = False, character = None, mode = None):
global g_input_state
# Pass the character, if any, onto the motion command.
# This is required for 'f', 't', etc
if character is not None:
motion_args['character'] = character
g_input_state.motion_command = motion
g_input_state.motion_command_args = motion_args
g_input_state.motion_inclusive = inclusive
g_input_state.motion_clip_to_line = clip_to_line
if not g_input_state.motion_mode_overridden \
and g_input_state.action_command \
and linewise:
g_input_state.motion_mode = MOTION_MODE_LINE
if mode is not None:
m = string_to_motion_mode(mode)
if m != -1:
set_motion_mode(self.view, m)
else:
print("invalid motion mode:", mode)
eval_input(self.view)
# Run a single, combined action and motion. Examples are 'D' (delete to EOL)
# and 'C' (change to EOL).
class SetActionMotion(sublime_plugin.TextCommand):
# Custom version of run_, so an edit object isn't created. This allows
# eval_input() to add the desired command to the undo stack
def run_(self, edit_token, args):
return self.run(**args)
def run(self, motion, action, motion_args = {}, motion_clip_to_line = False,
motion_inclusive = False, motion_linewise = False, action_args = {}):
global g_input_state
g_input_state.motion_command = motion
g_input_state.motion_command_args = motion_args
g_input_state.motion_inclusive = motion_inclusive
g_input_state.motion_clip_to_line = motion_clip_to_line
g_input_state.action_command = action
g_input_state.action_command_args = action_args
if motion_linewise:
g_input_state.motion_mode = MOTION_MODE_LINE
eval_input(self.view)
# Update the current motion mode. e.g., 'dvj'
class SetMotionMode(sublime_plugin.TextCommand):
def run_(self, edit_token, args):
if 'event' in args:
del args['event']
return self.run(**args)
def run(self, mode):
global g_input_state
m = string_to_motion_mode(mode)
if m != -1:
set_motion_mode(self.view, m)
g_input_state.motion_mode_overridden = True
else:
print("invalid motion mode")
class SetRegister(sublime_plugin.TextCommand):
def run_(self, edit_token, args):
return self.run(**args)
def run(self, character):
g_input_state.register = character
update_status_line(self.view)
def clip_point_to_line(view, f, pt):
l = view.line(pt)
if l.a == l.b:
return l.a
new_pt = f(pt)
if new_pt < l.a:
return l.a
elif new_pt >= l.b:
return l.b
else:
return new_pt
def transform_selection(view, f, extend = False, clip_to_line = False):
new_sel = []
sel = view.sel()
size = view.size()
for r in sel:
if clip_to_line:
new_pt = clip_point_to_line(view, f, r.b)
else:
new_pt = f(r.b)
if new_pt < 0: new_pt = 0
elif new_pt > size: new_pt = size
if extend:
new_sel.append(sublime.Region(r.a, new_pt))
else:
new_sel.append(sublime.Region(new_pt))
sel.clear()
for r in new_sel:
sel.add(r)
def transform_selection_regions(view, f):
new_sel = []
sel = view.sel()
for r in sel:
nr = f(r)
if nr is not None:
new_sel.append(nr)
sel.clear()
for r in new_sel:
sel.add(r)
def expand_to_full_line(view, ignore_trailing_newline = True):
new_sel = []
for s in view.sel():
if s.a == s.b:
new_sel.append(view.full_line(s.a))
else:
la = view.full_line(s.begin())
lb = view.full_line(s.end())
a = la.a
if ignore_trailing_newline and s.end() == lb.a:
# s.end() is already at EOL, don't go down to the next line
b = s.end()
else:
b = lb.b
if s.a < s.b:
new_sel.append(sublime.Region(a, b, 0))
else:
new_sel.append(sublime.Region(b, a, 0))
view.sel().clear()
for s in new_sel:
view.sel().add(s)
def orient_single_line_region(view, forward, r):
l = view.full_line(r.begin())
if l.a == r.begin() and l.end() == r.end():
if forward:
return l
else:
return sublime.Region(l.b, l.a)
else:
return r
def set_single_line_selection_direction(view, forward):
transform_selection_regions(view,
lambda r: orient_single_line_region(view, forward, r))
def orient_single_character_region(view, forward, r):
if r.begin() + 1 == r.end():
if forward:
return sublime.Region(r.begin(), r.end())
else:
return sublime.Region(r.end(), r.begin())
else:
return r
def set_single_character_selection_direction(view, forward):
transform_selection_regions(view,
lambda r: orient_single_character_region(view, forward, r))
def clip_empty_selection_to_line_contents(view):
new_sel = []
for s in view.sel():
if s.empty():
l = view.line(s.b)
if s.b == l.b and not l.empty():
s = sublime.Region(l.b - 1, l.b - 1, s.xpos)
new_sel.append(s)
view.sel().clear()
for s in new_sel:
view.sel().add(s)
def shrink_inclusive(r):
if r.a < r.b:
return sublime.Region(r.b - 1, r.b - 1, r.xpos)
else:
return sublime.Region(r.b, r.b, r.xpos)
def shrink_exclusive(r):
return sublime.Region(r.b, r.b, r.xpos)
def shrink_to_first_char(r):
if r.b < r.a:
# If the Region is reversed, the first char is the character *before*
# the first bound.
return sublime.Region(r.a - 1)
else:
return sublime.Region(r.a)
# This is the core: it takes a motion command, action command, and repeat
# counts, and runs them all.
#
# Note that this doesn't touch g_input_state, and doesn't maintain any state
# other than what's passed on its arguments. This allows it to operate correctly
# in macros, and when running via repeat.
class ViEval(sublime_plugin.TextCommand):
def run_(self, edit_token, args):
was_visual = self.view.has_non_empty_selection_region()
edit = self.view.begin_edit(edit_token, self.name(), args)
try:
self.run(edit, **args)
finally:
self.view.end_edit(edit)
# Glue the marked undo groups if visual mode was exited (e.g., by
# running an action while in visual mode). This ensures that
# v+motions+action can be repeated as a single unit.
if self.view.settings().get('command_mode') == True:
is_visual = self.view.has_non_empty_selection_region()
if was_visual and not is_visual:
self.view.run_command('glue_marked_undo_groups')
elif not is_visual:
self.view.run_command('unmark_undo_groups_for_gluing')
def run(self, edit, action_command, action_args,
motion_command, motion_args, motion_mode,
motion_inclusive, motion_clip_to_line,
prefix_repeat = None, motion_repeat = None):
explicit_repeat = (prefix_repeat is not None or motion_repeat is not None)
if prefix_repeat is None:
prefix_repeat = 1
if motion_repeat is None:
motion_repeat = 1
# Arguments are always passed as floats (thanks to JSON encoding),
# convert them back to integers
prefix_repeat = int(prefix_repeat)
motion_repeat = int(motion_repeat)
motion_mode = int(motion_mode)
# Combine the prefix_repeat and motion_repeat into motion_repeat, to
# allow commands like 2yy to work by first doing the motion twice,
# then operating once
if motion_command and prefix_repeat > 1:
motion_repeat *= prefix_repeat
prefix_repeat = 1
# Check if the motion command would like to handle the repeat itself
if motion_args and 'repeat' in motion_args:
motion_args['repeat'] = motion_repeat * prefix_repeat
motion_repeat = 1
prefix_repeat = 1
# Some commands behave differently if a repeat is given. e.g., 1G goes
# to line one, but G without a repeat goes to EOF. Let the command
# know if a repeat was specified.
if motion_args and 'explicit_repeat' in motion_args:
motion_args['explicit_repeat'] = explicit_repeat
visual_mode = self.view.has_non_empty_selection_region()
# Let the motion know if we're in visual mode, if it wants to know
if motion_args and 'visual' in motion_args:
motion_args['visual'] = visual_mode
for i in range(prefix_repeat):
# Run the motion command, extending the selection to the range of
# characters covered by the motion
if motion_command:
direction = 0
if motion_args and 'forward' in motion_args:
forward = motion_args['forward']
if forward:
direction = 1
else:
direction = -1
for j in range(motion_repeat):
if direction != 0 and motion_mode == MOTION_MODE_LINE:
# Ensure selections encompassing a single line are
# oriented in the same way as the motion, so they'll
# remain selected. This is needed so that Vk will work
# as expected
set_single_line_selection_direction(self.view, direction == 1)
elif direction != 0:
set_single_character_selection_direction(self.view, direction == 1)
if motion_mode == MOTION_MODE_LINE:
# Don't do either of the below things: this is
# important so that Vk on an empty line would select
# the following line.
pass
elif direction == 1 and motion_inclusive:
# Expand empty selections include the character
# they're on, and to start from the RHS of the
# character
transform_selection_regions(self.view,
lambda r: sublime.Region(r.b, r.b + 1, r.xpos) if r.empty() else r)
self.view.run_command(motion_command, motion_args)
# If the motion needs to be clipped to the line, remove any
# trailing newlines from the selection. For example, with the
# caret at the start of the last word on the line, 'dw' should
# delete the word, but not the newline, while 'w' should advance
# the caret to the first character of the next line.
if motion_mode != MOTION_MODE_LINE and action_command and motion_clip_to_line:
transform_selection_regions(self.view, lambda r: self.view.split_by_newlines(r)[0])
reindent = False
if motion_mode == MOTION_MODE_LINE:
expand_to_full_line(self.view, visual_mode)
if action_command == "enter_insert_mode":
# When lines are deleted before entering insert mode, the
# cursor should be left on an empty line. Leave the trailing
# newline out of the selection to allow for this.
transform_selection_regions(self.view,
lambda r: (sublime.Region(r.begin(), r.end() - 1)
if not r.empty() and self.view.substr(r.end() - 1) == "\n"
else r))
reindent = True
if action_command:
# Apply the action to the selection
self.view.run_command(action_command, action_args)
if reindent and self.view.settings().get('auto_indent'):
self.view.run_command('reindent', {'force_indent': False})
if not visual_mode:
# Shrink the selection down to a point
if motion_inclusive:
transform_selection_regions(self.view, shrink_inclusive)
else:
transform_selection_regions(self.view, shrink_exclusive)
# Clip the selections to the line contents
if self.view.settings().get('command_mode'):
clip_empty_selection_to_line_contents(self.view)
# Ensure the selection is visible
self.view.show(self.view.sel())
class EnterInsertMode(sublime_plugin.TextCommand):
# Ensure no undo group is created: the only entry on the undo stack should
# be the insert_command, if any
def run_(self, edit_token, args):
if args:
return self.run(**args)
else:
return self.run()
def run(self, insert_command = None, insert_args = {}, register = '"'):
# mark_undo_groups_for_gluing allows all commands run while in insert
# mode to comprise a single undo group, which is important for '.' to
# work as desired.
self.view.run_command('maybe_mark_undo_groups_for_gluing')
if insert_command:
args = insert_args.copy()
args.update({'register': register})
self.view.run_command(insert_command, args)
self.view.settings().set('command_mode', False)
self.view.settings().set('inverse_caret_state', False)
update_status_line(self.view)
class ExitInsertMode(sublime_plugin.TextCommand):
def run_(self, edit_token, args):
edit = self.view.begin_edit(edit_token, self.name(), args)
try:
self.run(edit)
finally:
self.view.end_edit(edit)
# Call after end_edit(), to ensure the final entry in the glued undo
# group is 'exit_insert_mode'.
self.view.run_command('glue_marked_undo_groups')
def run(self, edit):
self.view.settings().set('command_mode', True)
self.view.settings().set('inverse_caret_state', True)
if not self.view.has_non_empty_selection_region():
self.view.run_command('vi_move_by_characters_in_line', {'forward': False})
update_status_line(self.view)
class EnterVisualMode(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command('mark_undo_groups_for_gluing')
if g_input_state.motion_mode != MOTION_MODE_NORMAL:
set_motion_mode(self.view, MOTION_MODE_NORMAL)
transform_selection_regions(self.view, lambda r: sublime.Region(r.b, r.b + 1) if r.empty() else r)
class ExitVisualMode(sublime_plugin.TextCommand):
def run(self, edit, toggle = False):
if toggle:
if g_input_state.motion_mode != MOTION_MODE_NORMAL:
set_motion_mode(self.view, MOTION_MODE_NORMAL)
else:
self.view.run_command('shrink_selections')
else:
set_motion_mode(self.view, MOTION_MODE_NORMAL)
self.view.run_command('shrink_selections')
self.view.run_command('unmark_undo_groups_for_gluing')
class EnterVisualLineMode(sublime_plugin.TextCommand):
def run(self, edit):
set_motion_mode(self.view, MOTION_MODE_LINE)
expand_to_full_line(self.view)
self.view.run_command('maybe_mark_undo_groups_for_gluing')
class ShrinkSelections(sublime_plugin.TextCommand):
def shrink(self, r):
if r.empty():
return r
elif r.a < r.b:
return sublime.Region(r.b - 1)
else:
return sublime.Region(r.b)
def run(self, edit):
transform_selection_regions(self.view, self.shrink)
class ShrinkSelectionsToBeginning(sublime_plugin.TextCommand):
def shrink(self, r):
return sublime.Region(r.begin())
def run(self, edit, register = '"'):
transform_selection_regions(self.view, self.shrink)
class ShrinkSelectionsToEnd(sublime_plugin.TextCommand):
def shrink(self, r):
end = r.end()
if self.view.substr(end - 1) == u'\n':
# For linewise selections put the cursor *before* the line break
return sublime.Region(end - 1)
else:
return sublime.Region(end)
def run(self, edit, register = '"'):
transform_selection_regions(self.view, self.shrink)
class VisualUpperCase(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command("upper_case")
self.view.run_command("exit_visual_mode")
class VisualLowerCase(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command("lower_case")
self.view.run_command("exit_visual_mode")
# Sequence is used as part of glue_marked_undo_groups: the marked undo groups
# are rewritten into a single sequence command, that accepts all the previous
# commands
class Sequence(sublime_plugin.TextCommand):
def run(self, edit, commands):
for cmd, args in commands:
self.view.run_command(cmd, args)
class ViDelete(sublime_plugin.TextCommand):
def run(self, edit, register = '"'):
if self.view.has_non_empty_selection_region():
set_register(self.view, register, forward=False)
set_register(self.view, '1', forward=False)
self.view.run_command('left_delete')
class ViLeftDelete(sublime_plugin.TextCommand):
def run(self, edit, register = '"'):
set_register(self.view, register, forward=False)
set_register(self.view, '1', forward=False)
self.view.run_command('left_delete')
clip_empty_selection_to_line_contents(self.view)
class ViRightDelete(sublime_plugin.TextCommand):
def run(self, edit, register = '"'):
set_register(self.view, register, forward=True)
set_register(self.view, '1', forward=True)
self.view.run_command('right_delete')
clip_empty_selection_to_line_contents(self.view)
class ViCopy(sublime_plugin.TextCommand):
def run(self, edit, register = '"'):
set_register(self.view, register, forward=True)
set_register(self.view, '0', forward=True)
transform_selection_regions(self.view, shrink_to_first_char)
class ViPrefixableCommand(sublime_plugin.TextCommand):
# Ensure register and repeat are picked up from g_input_state, and that
# it'll be recorded on the undo stack
def run_(self, edit_token, args):
if not args:
args = {}
if g_input_state.register:
args['register'] = g_input_state.register
g_input_state.register = None
if g_input_state.prefix_repeat_digits:
args['repeat'] = digits_to_number(g_input_state.prefix_repeat_digits)
g_input_state.prefix_repeat_digits = []
if 'event' in args:
del args['event']
edit = self.view.begin_edit(edit_token, self.name(), args)
try:
return self.run(edit, **args)
finally:
self.view.end_edit(edit)
class ViPasteRight(ViPrefixableCommand):
def advance(self, pt):
if self.view.substr(pt) == '\n' or pt >= self.view.size():
return pt
else:
return pt + 1
def run(self, edit, register = '"', repeat = 1):
visual_mode = self.view.has_non_empty_selection_region()
if not visual_mode:
transform_selection(self.view, lambda pt: self.advance(pt))
self.view.run_command('paste_from_register', {'forward': not visual_mode,
'repeat': repeat,
'register': register})
class ViPasteLeft(ViPrefixableCommand):
def run(self, edit, register = '"', repeat = 1):
self.view.run_command('paste_from_register', {'forward': False,
'repeat': repeat,
'register': register})
def set_register(view, register, forward):
delta = 1
if not forward:
delta = -1
text = []
regions = []
for s in view.sel():
if s.empty():
s = sublime.Region(s.a, s.a + delta)
text.append(view.substr(s))
regions.append(s)
text = '\n'.join(text)
use_sys_clipboard = view.settings().get('vintage_use_clipboard', False) == True
if (use_sys_clipboard and register == '"') or (register in ('*', '+')):
sublime.set_clipboard(text)
# If the system's clipboard is used, Vim always propagates the data to
# the unnamed register too.
register = '"'
if register == '%':
pass
else:
reg = register.lower()
append = (reg != register)
if append and reg in g_registers:
g_registers[reg] += text
else:
g_registers[reg] = text
def get_register(view, register):
use_sys_clipboard = view.settings().get('vintage_use_clipboard', False) == True
register = register.lower()
if register == '%':
if view.file_name():
return os.path.basename(view.file_name())
else:
return None
elif (use_sys_clipboard and register == '"') or (register in ('*', '+')):
return sublime.get_clipboard()
else:
return g_registers.get(register, None)
def has_register(register):
if register in ['%', '*', '+']:
return True
else:
return register in g_registers
class PasteFromRegisterCommand(sublime_plugin.TextCommand):
def run(self, edit, register, repeat = 1, forward = True):
text = get_register(self.view, register)
if not text:
sublime.status_message("Undefined register" + register)
return
text = text * int(repeat)
self.view.run_command('vi_delete')
regions = [r for r in self.view.sel()]
new_sel = []
offset = 0
for s in regions:
s = sublime.Region(s.a + offset, s.b + offset)
if len(text) > 0 and text[-1] == '\n':
# paste line-wise
if forward:
start = self.view.full_line(s.end()).b
else:
start = self.view.line(s.begin()).a
num = self.view.insert(edit, start, text)
new_sel.append(start)
else:
# paste character-wise
num = self.view.insert(edit, s.begin(), text)
self.view.erase(edit, sublime.Region(s.begin() + num,
s.end() + num))
num -= s.size()
new_sel.append(s.begin())
offset += num
self.view.sel().clear()
for s in new_sel:
self.view.sel().add(s)
def is_enabled(self, register, repeat = 1, forward = True):
return has_register(register)
class ReplaceCharacter(sublime_plugin.TextCommand):
def run(self, edit, character):
new_sel = []
created_new_line = False
for s in reversed(self.view.sel()):
if s.empty():
self.view.replace(edit, sublime.Region(s.b, s.b + 1), character)
if character == "\n":
created_new_line = True
# selection should be in the first column of the newly
# created line
new_sel.append(sublime.Region(s.b + 1))
else:
new_sel.append(s)
else:
# Vim replaces characters with unprintable ones when r<enter> is
# pressed from visual mode. Let's not make a replacement in
# that case.
if character != '\n':
# Process lines contained in the selection individually.
# This way we preserve newline characters.
lines = self.view.split_by_newlines(s)
for line in lines:
self.view.replace(edit, line, character * line.size())
new_sel.append(sublime.Region(s.begin()))
self.view.sel().clear()
for s in new_sel:
self.view.sel().add(s)
if created_new_line and self.view.settings().get('auto_indent'):
self.view.run_command('reindent', {'force_indent': False})
class CenterOnCursor(sublime_plugin.TextCommand):
def run(self, edit):
self.view.show_at_center(self.view.sel()[0])
class ScrollCursorLineToTop(sublime_plugin.TextCommand):
def run(self, edit):
self.view.set_viewport_position((self.view.viewport_position()[0], self.view.layout_extent()[1]))
self.view.show(self.view.sel()[0], False)
class ScrollCursorLineToBottom(sublime_plugin.TextCommand):
def run(self, edit):
self.view.set_viewport_position((self.view.viewport_position()[0], 0.0))
self.view.show(self.view.sel()[0], False)
class ViScrollLines(ViPrefixableCommand):
def run(self, edit, forward = True, repeat = None):
if repeat:
line_delta = repeat * (1 if forward else -1)
else:
viewport_height = self.view.viewport_extent()[1]
lines_per_page = viewport_height / self.view.line_height()
line_delta = int(round(lines_per_page / (2 if forward else -2)))
visual_mode = self.view.has_non_empty_selection_region()
y_deltas = []
def transform(pt):
row = self.view.rowcol(pt)[0]
new_pt = self.view.text_point(row + line_delta, 0)
y_deltas.append(self.view.text_to_layout(new_pt)[1]
- self.view.text_to_layout(pt)[1])
return new_pt
transform_selection(self.view, transform, extend = visual_mode)
self.view.run_command('vi_move_to_first_non_white_space_character',
{'extend': visual_mode})
# Vim scrolls the viewport as far as it moves the cursor. With multiple
# selections the cursors could have moved different distances, due to
# word wrapping. Move the viewport by the average of those distances.
avg_y_delta = sum(y_deltas) / len(y_deltas)
vp = self.view.viewport_position()
self.view.set_viewport_position((vp[0], vp[1] + avg_y_delta))
class ViIndent(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command('indent')
transform_selection_regions(self.view, shrink_to_first_char)
class ViUnindent(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command('unindent')
transform_selection_regions(self.view, shrink_to_first_char)
class ViSetBookmark(sublime_plugin.TextCommand):
def run(self, edit, character):
sublime.status_message("Set bookmark " + character)
self.view.add_regions("bookmark_" + character, [s for s in self.view.sel()],
"", "", sublime.PERSISTENT | sublime.HIDDEN)
class ViSelectBookmark(sublime_plugin.TextCommand):
def run(self, edit, character, select_bol=False):
if self.view.get_regions("bookmark_" + character):
self.view.run_command('select_all_bookmarks', {'name': "bookmark_" + character})
if select_bol:
sels = list(self.view.sel())
self.view.sel().clear()
for r in sels:
start = self.view.line(r.a).begin()
self.view.sel().add(sublime.Region(start, start))
g_macro_target = None
class ViBeginRecordMacro(sublime_plugin.TextCommand):
def run(self, edit, character):
global g_macro_target
g_macro_target = character
self.view.run_command('start_record_macro')
class ViEndRecordMacro(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command('stop_record_macro')
if not g_macro_target:
return
m = sublime.get_macro()
# TODO: Convert the macro to a string before trying to store it in a
# register
g_registers[g_macro_target] = m
class ViReplayMacro(sublime_plugin.TextCommand):
def run(self, edit, character):
if not character in g_registers:
return
m = g_registers[character]
global g_input_state
prefix_repeat_digits, motion_repeat_digits = None, None
if len(g_input_state.prefix_repeat_digits) > 0:
prefix_repeat_digits = digits_to_number(g_input_state.prefix_repeat_digits)
if len(g_input_state.motion_repeat_digits) > 0:
motion_repeat_digits = digits_to_number(g_input_state.motion_repeat_digits)
repetitions = 1
if prefix_repeat_digits:
repetitions *= prefix_repeat_digits
if motion_repeat_digits:
repetitions *= motion_repeat_digits
for i in range(repetitions):
for d in m:
cmd = d['command']
args = d['args']
self.view.run_command(cmd, args)
class ShowAsciiInfo(sublime_plugin.TextCommand):
def run(self, edit):
c = self.view.substr(self.view.sel()[0].end())
sublime.status_message("<%s> %d, Hex %s, Octal %s" %
(c, ord(c), hex(ord(c))[2:], oct(ord(c))))
class ViReverseSelectionsDirection(sublime_plugin.TextCommand):
def run(self, edit):
new_sels = []
for s in self.view.sel():
new_sels.append(sublime.Region(s.b, s.a))
self.view.sel().clear()
for s in new_sels:
self.view.sel().add(s)
class MoveGroupFocus(sublime_plugin.WindowCommand):
def run(self, direction):
cells = self.window.get_layout()['cells']
active_group = self.window.active_group()
x1, y1, x2, y2 = cells[active_group]
idxs = list(range(len(cells)))
del idxs[active_group]
# Matches are any group that shares a border with the active group in the
# specified direction.
if direction == "up":
matches = (i for i in idxs if cells[i][3] == y1 and cells[i][0] < x2 and cells[i][2] > x1)
elif direction == "down":
matches = (i for i in idxs if cells[i][1] == y2 and cells[i][0] < x2 and cells[i][2] > x1)
elif direction == "right":
matches = (i for i in idxs if cells[i][0] == x2 and cells[i][1] < y2 and cells[i][3] > y1)
elif direction == "left":
matches = (i for i in idxs if cells[i][2] == x1 and cells[i][1] < y2 and cells[i][3] > y1)
# Focus the first group found in the specified direction, if there is one.
try:
self.window.focus_group(next(matches))
except StopIteration:
return
| |
#!/usr/bin/env python
# Copyright (c) 2017 Cable Television Laboratories, Inc. and others.
#
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""Define the parent class of vping_ssh and vping_userdata testcases."""
from datetime import datetime
import logging
import time
import uuid
from snaps.config.flavor import FlavorConfig
from snaps.config.network import NetworkConfig, SubnetConfig
from snaps.config.router import RouterConfig
from snaps.openstack.create_flavor import OpenStackFlavor
from snaps.openstack.tests import openstack_tests
from snaps.openstack.utils import deploy_utils
from xtesting.core import testcase
from functest.opnfv_tests.openstack.snaps import snaps_utils
from functest.utils import config
from functest.utils import env
class VPingBase(testcase.TestCase):
"""
Base class for vPing tests that check connectivity between two VMs shared
internal network.
This class is responsible for creating the image, internal network.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, **kwargs):
super(VPingBase, self).__init__(**kwargs)
self.logger = logging.getLogger(__name__)
self.os_creds = kwargs.get('os_creds') or snaps_utils.get_credentials()
self.creators = list()
self.image_creator = None
self.network_creator = None
self.vm1_creator = None
self.vm2_creator = None
self.router_creator = None
# Shared metadata
self.guid = '-' + str(uuid.uuid4())
self.router_name = getattr(
config.CONF, 'vping_router_name') + self.guid
self.vm1_name = getattr(
config.CONF, 'vping_vm_name_1') + self.guid
self.vm2_name = getattr(config.CONF, 'vping_vm_name_2') + self.guid
self.vm_boot_timeout = getattr(config.CONF, 'vping_vm_boot_timeout')
self.vm_delete_timeout = getattr(
config.CONF, 'vping_vm_delete_timeout')
self.vm_ssh_connect_timeout = getattr(
config.CONF, 'vping_vm_ssh_connect_timeout')
self.ping_timeout = getattr(config.CONF, 'vping_ping_timeout')
self.flavor_name = 'vping-flavor' + self.guid
# Move this configuration option up for all tests to leverage
if hasattr(config.CONF, 'snaps_images_cirros'):
self.cirros_image_config = getattr(
config.CONF, 'snaps_images_cirros')
else:
self.cirros_image_config = None
def run(self, **kwargs): # pylint: disable=too-many-locals
"""
Begins the test execution which should originate from the subclass
"""
self.logger.info('Begin virtual environment setup')
self.start_time = time.time()
self.logger.info(
"vPing Start Time:'%s'",
datetime.fromtimestamp(self.start_time).strftime(
'%Y-%m-%d %H:%M:%S'))
image_base_name = '{}-{}'.format(
getattr(config.CONF, 'vping_image_name'),
str(self.guid))
os_image_settings = openstack_tests.cirros_image_settings(
image_base_name, image_metadata=self.cirros_image_config)
self.logger.info("Creating image with name: '%s'", image_base_name)
self.image_creator = deploy_utils.create_image(
self.os_creds, os_image_settings)
self.creators.append(self.image_creator)
private_net_name = getattr(
config.CONF, 'vping_private_net_name') + self.guid
private_subnet_name = getattr(
config.CONF, 'vping_private_subnet_name') + self.guid
private_subnet_cidr = getattr(config.CONF, 'vping_private_subnet_cidr')
vping_network_type = None
vping_physical_network = None
vping_segmentation_id = None
if hasattr(config.CONF, 'vping_network_type'):
vping_network_type = getattr(config.CONF, 'vping_network_type')
if hasattr(config.CONF, 'vping_physical_network'):
vping_physical_network = getattr(
config.CONF, 'vping_physical_network')
if hasattr(config.CONF, 'vping_segmentation_id'):
vping_segmentation_id = getattr(
config.CONF, 'vping_segmentation_id')
self.logger.info(
"Creating network with name: '%s'", private_net_name)
self.network_creator = deploy_utils.create_network(
self.os_creds,
NetworkConfig(
name=private_net_name,
network_type=vping_network_type,
physical_network=vping_physical_network,
segmentation_id=vping_segmentation_id,
subnet_settings=[SubnetConfig(
name=private_subnet_name,
cidr=private_subnet_cidr,
dns_nameservers=[env.get('NAMESERVER')])]))
self.creators.append(self.network_creator)
# Creating router to external network
log = "Creating router with name: '%s'" % self.router_name
self.logger.info(log)
ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
self.router_creator = deploy_utils.create_router(
self.os_creds,
RouterConfig(
name=self.router_name,
external_gateway=ext_net_name,
internal_subnets=[private_subnet_name]))
self.creators.append(self.router_creator)
self.logger.info(
"Creating flavor with name: '%s'", self.flavor_name)
flavor_ram = getattr(config.CONF, 'openstack_flavor_ram')
flavor_metadata = getattr(config.CONF, 'flavor_extra_specs', None)
flavor_creator = OpenStackFlavor(
self.os_creds,
FlavorConfig(name=self.flavor_name, ram=flavor_ram, disk=1,
vcpus=1, metadata=flavor_metadata))
flavor_creator.create()
self.creators.append(flavor_creator)
def _execute(self):
"""
Method called by subclasses after environment has been setup
:return: the exit code
"""
self.logger.info('Begin test execution')
test_ip = self.vm1_creator.get_port_ip(
self.vm1_creator.instance_settings.port_settings[0].name)
if self.vm1_creator.vm_active(
block=True) and self.vm2_creator.vm_active(block=True):
result = self._do_vping(self.vm2_creator, test_ip)
else:
raise Exception('VMs never became active')
self.stop_time = time.time()
if result != testcase.TestCase.EX_OK:
self.result = 0
return testcase.TestCase.EX_RUN_ERROR
self.result = 100
return testcase.TestCase.EX_OK
def clean(self):
"""
Cleanup all OpenStack objects. Should be called on completion
:return:
"""
if getattr(config.CONF, 'vping_cleanup_objects') == 'True':
for creator in reversed(self.creators):
try:
creator.clean()
except Exception as error: # pylint: disable=broad-except
self.logger.error('Unexpected error cleaning - %s', error)
def _do_vping(self, vm_creator, test_ip):
"""
Method to be implemented by subclasses
Begins the real test after the OpenStack environment has been setup
:param vm_creator: the SNAPS VM instance creator object
:param test_ip: the IP to which the VM needs to issue the ping
:return: T/F
"""
raise NotImplementedError('vping execution is not implemented')
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import semantic_version
import types
from unittest import mock
import weakref
from oslo_utils.uuidutils import generate_uuid
from murano.dsl import dsl_types
from murano.dsl import exceptions
from murano.dsl import helpers
from murano.tests.unit import base
class TestDSLHelpers(base.MuranoTestCase):
@mock.patch.object(helpers, 'with_object_store', autospec=True)
def test_parallel_select_except_exception(self, mock_with_object_store):
mock_with_object_store.side_effect = ValueError
self.assertRaises(ValueError, helpers.parallel_select,
[mock.sentinel.foo], lambda: None)
def test_enum(self):
self.assertEqual('Enum', helpers.enum().__name__)
def test_cast_with_murano_type(self):
mock_attrs = {
'name': mock.sentinel.class_type,
'version': semantic_version.Version('1.0.0'),
'ancestors.return_value': []
}
mock_type = mock.Mock()
mock_type.configure_mock(**mock_attrs)
mock_obj = mock.Mock(type=mock_type)
mock_obj.cast.return_value = mock.sentinel.foo_cast_value
mock_murano_class = mock.Mock(spec=dsl_types.MuranoType)
mock_murano_class.name = mock.sentinel.class_type
mock_murano_class.version = semantic_version.Version('1.0.0')
result = helpers.cast(mock_obj, mock_murano_class,
pov_or_version_spec=None)
self.assertEqual(mock.sentinel.foo_cast_value, result)
mock_obj.cast.assert_called_once_with(mock_type)
def test_cast_except_value_error(self):
mock_attrs = {
'name': mock.sentinel.class_type,
'version': semantic_version.Version('1.0.0'),
'ancestors.return_value': []
}
mock_type = mock.Mock()
mock_type.configure_mock(**mock_attrs)
mock_obj = mock.Mock(type=mock_type)
mock_murano_class = mock.Mock(spec=dsl_types.MuranoType)
mock_murano_class.name = mock.sentinel.class_type
e = self.assertRaises(ValueError, helpers.cast, mock_obj,
mock_murano_class,
pov_or_version_spec=mock.Mock())
self.assertEqual('pov_or_version_spec of unsupported type {0}'
.format(type(mock.Mock())), str(e))
def test_cast_except_no_class_found(self):
mock_attrs = {
'name': mock.sentinel.name,
'package.name': mock.sentinel.package_name,
'version': mock.sentinel.version,
'ancestors.return_value': []
}
mock_type = mock.Mock()
mock_type.configure_mock(**mock_attrs)
mock_obj = mock.Mock(type=mock_type)
mock_murano_class = mock.Mock(spec=dsl_types.MuranoTypeReference)
mock_murano_class.type = mock.sentinel.foo_class
mock_version_spec = mock.Mock(spec=dsl_types.MuranoPackage)
e = self.assertRaises(exceptions.NoClassFound, helpers.cast, mock_obj,
mock_murano_class,
pov_or_version_spec=mock_version_spec)
self.assertIn('Class "sentinel.foo_class" is not found', str(e))
def test_cast_except_ambiguous_class_name(self):
mock_attrs = {
'name': mock.sentinel.class_type,
'version': semantic_version.Version('1.0.0')
}
mock_ancestor = mock.Mock()
mock_ancestor.configure_mock(**mock_attrs)
mock_attrs['ancestors.return_value'] = [mock_ancestor]
mock_type = mock.Mock()
mock_type.configure_mock(**mock_attrs)
mock_obj = mock.Mock(type=mock_type)
mock_murano_class = mock.Mock(spec=dsl_types.MuranoTypeReference)
mock_murano_class.type = mock.sentinel.class_type
# pov_or_version_spec of '1' will be converted to
# semantic_version.Spec('>=1.0.0,<2.0.0-0')
self.assertRaises(exceptions.AmbiguousClassName, helpers.cast,
mock_obj, mock_murano_class, pov_or_version_spec='1')
def test_inspect_is_method(self):
mock_cls = mock.Mock(foo=lambda: None, bar=None)
self.assertTrue(helpers.inspect_is_method(mock_cls, 'foo'))
self.assertFalse(helpers.inspect_is_method(mock_cls, 'bar'))
def test_inspect_is_property(self):
data_descriptor = mock.MagicMock(__get__=None, __set__=None)
mock_cls = mock.Mock(foo=data_descriptor, bar=None)
self.assertTrue(helpers.inspect_is_property(mock_cls, 'foo'))
self.assertFalse(helpers.inspect_is_property(mock_cls, 'bar'))
def test_updated_dict(self):
dict_ = {'foo': 'bar'}
self.assertEqual(dict_, helpers.updated_dict(dict_, {}))
def test_updated_dict_with_null_arg(self):
dict_ = {'foo': 'bar'}
self.assertEqual(dict_, helpers.updated_dict(None, dict_))
def test_resolve_with_return_reference_true(self):
mock_value = mock.Mock(spec=dsl_types.MuranoTypeReference)
mock_scope_type = mock.Mock(spec=dsl_types.MuranoTypeReference)
result = helpers.resolve_type(mock_value, mock_scope_type, True)
self.assertEqual(mock_value, result)
mock_value = mock.Mock()
mock_value.get_reference.return_value = mock.sentinel.foo_reference
mock_scope_type = mock.Mock()
mock_scope_type.package.find_class.return_value = mock_value
result = helpers.resolve_type(mock_value, mock_scope_type, True)
self.assertEqual(mock.sentinel.foo_reference, result)
def test_resolve_type_with_null_value(self):
self.assertIsNone(helpers.resolve_type(None, None))
def test_assemble_object_definition(self):
test_parsed = {
'type': mock.sentinel.type,
'properties': {},
'id': mock.sentinel.id,
'name': mock.sentinel.name,
'metadata': mock.sentinel.metadata,
'destroyed': True,
'extra': {}
}
expected = {
'?': {
'type': mock.sentinel.type,
'id': mock.sentinel.id,
'name': mock.sentinel.name,
'metadata': mock.sentinel.metadata,
'destroyed': True
}
}
result = helpers.assemble_object_definition(test_parsed)
for key, val in expected.items():
self.assertEqual(val, result[key])
@mock.patch.object(helpers, 'format_type_string', autospec=True)
def test_assemble_object_definition_with_serializable_model_format(
self, mock_format_type_string):
mock_format_type_string.return_value = mock.sentinel.type
test_parsed = {
'type': mock.sentinel.type,
'properties': {},
'id': mock.sentinel.id,
'name': mock.sentinel.name,
'metadata': mock.sentinel.metadata,
'destroyed': True,
'extra': {}
}
expected = {
'?': {
'type': mock.sentinel.type,
'id': mock.sentinel.id,
'name': mock.sentinel.name,
'metadata': mock.sentinel.metadata,
'destroyed': True
}
}
model_format = dsl_types.DumpTypes.Serializable
result = helpers.assemble_object_definition(test_parsed, model_format)
for key, val in expected['?'].items():
self.assertEqual(val, result['?'][key])
def test_assemble_object_definition_with_inline_model_format(self):
test_parsed = {
'type': mock.sentinel.type,
'properties': mock.sentinel.properties,
'id': mock.sentinel.id,
'name': mock.sentinel.name,
'metadata': mock.sentinel.metadata,
'dependencies': mock.sentinel.dependencies,
'destroyed': mock.sentinel.destroyed,
'extra': {}
}
model_format = dsl_types.DumpTypes.Inline
expected = copy.copy(test_parsed)
expected[mock.sentinel.type] = mock.sentinel.properties
for key in ['type', 'extra', 'properties']:
expected.pop(key)
result = helpers.assemble_object_definition(test_parsed, model_format)
for key, val in expected.items():
self.assertEqual(val, result[key])
def test_assemble_object_definition_except_value_error(self):
test_parsed = {
'type': mock.sentinel.type,
'properties': {},
'id': mock.sentinel.id,
'name': mock.sentinel.name,
'metadata': mock.sentinel.metadata,
'destroyed': True,
'extra': {}
}
e = self.assertRaises(ValueError, helpers.assemble_object_definition,
test_parsed, None)
self.assertEqual('Invalid Serialization Type', str(e))
def test_function(self):
def f():
return
self.assertTrue(isinstance(helpers.function(f), types.FunctionType))
def test_function_from_method(self):
class C:
def m(self):
return
c = C()
self.assertTrue(isinstance(helpers.function(c.m), types.FunctionType))
def test_weak_proxy(self):
self.assertIsNone(helpers.weak_proxy(None))
def test_weak_proxy_with_reference_type(self):
result = helpers.weak_proxy(weakref.ReferenceType(int))
self.assertEqual('int', result.__name__)
@mock.patch.object(helpers, 'get_object_store', autospec=True)
def test_weak_ref(self, mock_get_object_store):
mock_object_store = mock.Mock(
**{'get.return_value': mock.sentinel.res})
mock_get_object_store.return_value = mock_object_store
test_obj = dsl_types.MuranoObject()
setattr(test_obj, 'object_id', generate_uuid())
murano_object_weak_ref = helpers.weak_ref(test_obj)
setattr(murano_object_weak_ref, 'ref', lambda *args: None)
result = murano_object_weak_ref.__call__()
self.assertEqual(mock.sentinel.res, result)
self.assertEqual('weakref',
murano_object_weak_ref.ref.__class__.__name__)
def test_weak_ref_with_null_obj(self):
self.assertIsNone(helpers.weak_ref(None))
@mock.patch.object(helpers, 're', autospec=True)
def test_parse_type_string_with_null_res(self, mock_re):
mock_re.compile.return_value = mock.Mock(
**{'match.return_value': None})
self.assertIsNone(helpers.parse_type_string('', None, None))
def test_format_type_string(self):
inner_type_obj = mock.Mock(spec=dsl_types.MuranoType)
inner_type_obj.configure_mock(**{'name': 'foo', 'version': 'foo_ver'})
inner_type_obj_pkg = mock.Mock()
inner_type_obj_pkg.configure_mock(name='foo_pkg')
setattr(inner_type_obj, 'package', inner_type_obj_pkg)
type_obj = mock.Mock(spec=dsl_types.MuranoTypeReference,
type=inner_type_obj)
result = helpers.format_type_string(type_obj)
self.assertEqual('foo/foo_ver@foo_pkg', result)
def test_format_type_string_except_value_error(self):
type_obj = mock.Mock(spec=dsl_types.MuranoTypeReference, type=None)
e = self.assertRaises(ValueError, helpers.format_type_string, type_obj)
self.assertEqual('Invalid argument', str(e))
def test_patch_dict(self):
path = 'foo.bar.baz'
fake_dict = mock.MagicMock(spec=dict)
# Make the dict return itself to test whether all the parts are called.
fake_dict.get.return_value = fake_dict
helpers.patch_dict(fake_dict, path, None)
fake_dict.get.assert_has_calls([mock.call('foo'), mock.call('bar')])
fake_dict.pop.assert_not_called()
def test_patch_dict_without_dict(self):
path = 'foo.bar.baz'
not_a_dict = mock.Mock()
helpers.patch_dict(not_a_dict, path, None)
not_a_dict.get.assert_not_called()
not_a_dict.pop.assert_not_called()
@mock.patch.object(helpers, 'gc')
def test_walk_gc_with_towards_true(self, mock_gc, autospec=True):
mock_gc.get_referrers.side_effect = [
[mock.sentinel.second], [mock.sentinel.third]
]
first_obj = mock.sentinel.first
handler = mock.MagicMock()
handler.return_value = True
expected = [
[mock.sentinel.first],
[mock.sentinel.first, mock.sentinel.second],
[mock.sentinel.first, mock.sentinel.second, mock.sentinel.third]
]
actual = []
for obj in helpers.walk_gc(first_obj, True, handler):
actual.append(obj)
self.assertEqual(expected, actual)
@mock.patch.object(helpers, 'gc', autospec=True)
def test_walk_gc_with_towards_false(self, mock_gc):
mock_gc.get_referents.side_effect = [
# Trigger the continue by duplicating entries.
[mock.sentinel.second], [mock.sentinel.second]
]
first_obj = mock.sentinel.first
handler = mock.MagicMock()
handler.return_value = True
expected = [
[mock.sentinel.first],
[mock.sentinel.second, mock.sentinel.first]
]
actual = []
for obj in helpers.walk_gc(first_obj, False, handler):
actual.append(obj)
self.assertEqual(expected, actual)
class TestMergeDicts(base.MuranoTestCase):
def check(self, dict1, dict2, expected):
result = helpers.merge_dicts(dict1, dict2)
self.assertEqual(expected, result)
def test_dicts_plain(self):
dict1 = {"a": "1"}
dict2 = {"a": "100", "ab": "12"}
expected = {"a": "100", "ab": "12"}
self.check(dict1, dict2, expected)
def test_different_types_none(self):
dict1 = {"a": "1"}
dict2 = {"a": None, "ab": "12"}
expected = {"a": "1", "ab": "12"}
self.check(dict1, dict2, expected)
def test_different_types_of_iterable(self):
dict1 = {"a": {"ab": "1"}}
dict2 = {"a": ["ab", "1"]}
self.assertRaises(TypeError, helpers.merge_dicts, dict1, dict2)
def test_merge_nested_dicts(self):
dict1 = {"a": {"ab": {}, "abc": "1"}}
dict2 = {"a": {"abc": "123"}}
expected = {"a": {"ab": {}, "abc": "123"}}
self.check(dict1, dict2, expected)
def test_merge_nested_dicts_with_max_levels(self):
dict1 = {"a": {"ab": {"abcd": "1234"}, "abc": "1"}}
dict2 = {"a": {"ab": {"y": "9"}, "abc": "123"}}
expected = {"a": {"ab": {"y": "9"}, "abc": "123"}}
result = helpers.merge_dicts(dict1, dict2, max_levels=2)
self.assertEqual(expected, result)
def test_merge_with_lists(self):
dict1 = {"a": [1, 2]}
dict2 = {"a": [1, 3, 2, 4]}
expected = {"a": [1, 2, 3, 4]}
self.check(dict1, dict2, expected)
class TestParseVersionSpec(base.MuranoTestCase):
def check(self, expected, version_spec):
self.assertEqual(expected, helpers.parse_version_spec(version_spec))
def test_empty_version_spec(self):
version_spec = ""
expected = semantic_version.Spec('>=0.0.0', '<1.0.0')
self.check(expected, version_spec)
def test_empty_kind(self):
version_spec = "1.11.111"
expected = semantic_version.Spec('==1.11.111')
self.check(expected, version_spec)
def test_implicit_major(self):
version_spec = ">=2"
expected = semantic_version.Spec('>=2.0.0')
self.check(expected, version_spec)
def test_implicit_minor(self):
version_spec = ">=2.1"
expected = semantic_version.Spec('>=2.1.0')
self.check(expected, version_spec)
def test_remove_spaces(self):
version_spec = "< = 2 .1"
expected = semantic_version.Spec('<2.2.0')
self.check(expected, version_spec)
def test_input_version(self):
version_spec = semantic_version.Version('1.11.111')
expected = semantic_version.Spec('==1.11.111')
self.check(expected, version_spec)
def test_input_spec(self):
version_spec = semantic_version.Spec('<=1', '<=1.11')
expected = semantic_version.Spec('<1.12.0', '<2.0.0')
self.check(expected, version_spec)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.layers import base
from tensorflow.python.ops import init_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['layers.Dense'])
class Dense(keras_layers.Dense, base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
_reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@deprecation.deprecated(
date=None,
instructions='Use keras.layers.dense instead.')
@tf_export(v1=['layers.dense'])
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor the same shape as `inputs` except the last dimension is of
size `units`.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
@tf_export(v1=['layers.Dropout'])
class Dropout(keras_layers.Dropout, base.Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`.
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(rate=rate,
noise_shape=noise_shape,
seed=seed,
name=name,
**kwargs)
def call(self, inputs, training=False):
return super(Dropout, self).call(inputs, training=training)
@deprecation.deprecated(
date=None,
instructions='Use keras.layers.dropout instead.')
@tf_export(v1=['layers.dropout'])
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
@tf_export(v1=['layers.Flatten'])
class Flatten(keras_layers.Flatten, base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, None)`
```
"""
pass
@deprecation.deprecated(
date=None,
instructions='Use keras.layers.flatten instead.')
@tf_export(v1=['layers.flatten'])
def flatten(inputs, name=None, data_format='channels_last'):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
inputs: Tensor input.
name: The name of the layer (string).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
Returns:
Reshaped tensor.
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, None)`
```
"""
layer = Flatten(name=name, data_format=data_format)
return layer.apply(inputs)
# Aliases
FullyConnected = Dense
fully_connected = dense
| |
"""
Support for Xiaomi Philips Lights.
LED Ball, Candle, Downlight, Ceiling, Eyecare 2, Bedside & Desklamp Lamp.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/light.xiaomi_miio/
"""
import asyncio
import datetime
from datetime import timedelta
from functools import partial
import logging
from math import ceil
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_ENTITY_ID, DOMAIN, PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, Light)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.util import color, dt
REQUIREMENTS = ['python-miio==0.4.4', 'construct==2.9.45']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Xiaomi Philips Light'
DATA_KEY = 'light.xiaomi_miio'
CONF_MODEL = 'model'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MODEL): vol.In(
['philips.light.sread1',
'philips.light.ceiling',
'philips.light.zyceiling',
'philips.light.moonlight',
'philips.light.bulb',
'philips.light.candle',
'philips.light.candle2',
'philips.light.mono1',
'philips.light.downlight',
]),
})
# The light does not accept cct values < 1
CCT_MIN = 1
CCT_MAX = 100
DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS = 4
DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES = 1
SUCCESS = ['ok']
ATTR_MODEL = 'model'
ATTR_SCENE = 'scene'
ATTR_DELAYED_TURN_OFF = 'delayed_turn_off'
ATTR_TIME_PERIOD = 'time_period'
ATTR_NIGHT_LIGHT_MODE = 'night_light_mode'
ATTR_AUTOMATIC_COLOR_TEMPERATURE = 'automatic_color_temperature'
ATTR_REMINDER = 'reminder'
ATTR_EYECARE_MODE = 'eyecare_mode'
# Moonlight
ATTR_SLEEP_ASSISTANT = 'sleep_assistant'
ATTR_SLEEP_OFF_TIME = 'sleep_off_time'
ATTR_TOTAL_ASSISTANT_SLEEP_TIME = 'total_assistant_sleep_time'
ATTR_BRAND_SLEEP = 'brand_sleep'
ATTR_BRAND = 'brand'
SERVICE_SET_SCENE = 'xiaomi_miio_set_scene'
SERVICE_SET_DELAYED_TURN_OFF = 'xiaomi_miio_set_delayed_turn_off'
SERVICE_REMINDER_ON = 'xiaomi_miio_reminder_on'
SERVICE_REMINDER_OFF = 'xiaomi_miio_reminder_off'
SERVICE_NIGHT_LIGHT_MODE_ON = 'xiaomi_miio_night_light_mode_on'
SERVICE_NIGHT_LIGHT_MODE_OFF = 'xiaomi_miio_night_light_mode_off'
SERVICE_EYECARE_MODE_ON = 'xiaomi_miio_eyecare_mode_on'
SERVICE_EYECARE_MODE_OFF = 'xiaomi_miio_eyecare_mode_off'
XIAOMI_MIIO_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SERVICE_SCHEMA_SET_SCENE = XIAOMI_MIIO_SERVICE_SCHEMA.extend({
vol.Required(ATTR_SCENE):
vol.All(vol.Coerce(int), vol.Clamp(min=1, max=4))
})
SERVICE_SCHEMA_SET_DELAYED_TURN_OFF = XIAOMI_MIIO_SERVICE_SCHEMA.extend({
vol.Required(ATTR_TIME_PERIOD):
vol.All(cv.time_period, cv.positive_timedelta)
})
SERVICE_TO_METHOD = {
SERVICE_SET_DELAYED_TURN_OFF: {
'method': 'async_set_delayed_turn_off',
'schema': SERVICE_SCHEMA_SET_DELAYED_TURN_OFF},
SERVICE_SET_SCENE: {
'method': 'async_set_scene',
'schema': SERVICE_SCHEMA_SET_SCENE},
SERVICE_REMINDER_ON: {'method': 'async_reminder_on'},
SERVICE_REMINDER_OFF: {'method': 'async_reminder_off'},
SERVICE_NIGHT_LIGHT_MODE_ON: {'method': 'async_night_light_mode_on'},
SERVICE_NIGHT_LIGHT_MODE_OFF: {'method': 'async_night_light_mode_off'},
SERVICE_EYECARE_MODE_ON: {'method': 'async_eyecare_mode_on'},
SERVICE_EYECARE_MODE_OFF: {'method': 'async_eyecare_mode_off'},
}
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the light from config."""
from miio import Device, DeviceException
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
model = config.get(CONF_MODEL)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
devices = []
unique_id = None
if model is None:
try:
miio_device = Device(host, token)
device_info = miio_device.info()
model = device_info.model
unique_id = "{}-{}".format(model, device_info.mac_address)
_LOGGER.info("%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version)
except DeviceException:
raise PlatformNotReady
if model == 'philips.light.sread1':
from miio import PhilipsEyecare
light = PhilipsEyecare(host, token)
primary_device = XiaomiPhilipsEyecareLamp(
name, light, model, unique_id)
devices.append(primary_device)
hass.data[DATA_KEY][host] = primary_device
secondary_device = XiaomiPhilipsEyecareLampAmbientLight(
name, light, model, unique_id)
devices.append(secondary_device)
# The ambient light doesn't expose additional services.
# A hass.data[DATA_KEY] entry isn't needed.
elif model in ['philips.light.ceiling', 'philips.light.zyceiling']:
from miio import Ceil
light = Ceil(host, token)
device = XiaomiPhilipsCeilingLamp(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model == 'philips.light.moonlight':
from miio import PhilipsMoonlight
light = PhilipsMoonlight(host, token)
device = XiaomiPhilipsMoonlightLamp(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model in ['philips.light.bulb',
'philips.light.candle',
'philips.light.candle2',
'philips.light.downlight']:
from miio import PhilipsBulb
light = PhilipsBulb(host, token)
device = XiaomiPhilipsBulb(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model == 'philips.light.mono1':
from miio import PhilipsBulb
light = PhilipsBulb(host, token)
device = XiaomiPhilipsGenericLight(name, light, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
else:
_LOGGER.error(
'Unsupported device found! Please create an issue at '
'https://github.com/syssi/philipslight/issues '
'and provide the following data: %s', model)
return False
async_add_entities(devices, update_before_add=True)
async def async_service_handler(service):
"""Map services to methods on Xiaomi Philips Lights."""
method = SERVICE_TO_METHOD.get(service.service)
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_devices = [dev for dev in hass.data[DATA_KEY].values()
if dev.entity_id in entity_ids]
else:
target_devices = hass.data[DATA_KEY].values()
update_tasks = []
for target_device in target_devices:
if not hasattr(target_device, method['method']):
continue
await getattr(target_device, method['method'])(**params)
update_tasks.append(target_device.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks, loop=hass.loop)
for xiaomi_miio_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[xiaomi_miio_service].get(
'schema', XIAOMI_MIIO_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, xiaomi_miio_service, async_service_handler, schema=schema)
class XiaomiPhilipsAbstractLight(Light):
"""Representation of a Abstract Xiaomi Philips Light."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
self._name = name
self._light = light
self._model = model
self._unique_id = unique_id
self._brightness = None
self._available = False
self._state = None
self._state_attrs = {
ATTR_MODEL: self._model,
}
@property
def should_poll(self):
"""Poll the light."""
return True
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a light command handling error messages."""
from miio import DeviceException
try:
result = await self.hass.async_add_executor_job(
partial(func, *args, **kwargs))
_LOGGER.debug("Response received from light: %s", result)
return result == SUCCESS
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
self._available = False
return False
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug(
"Setting brightness: %s %s%%",
brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._light.set_brightness, percent_brightness)
if result:
self._brightness = brightness
else:
await self._try_command(
"Turning the light on failed.", self._light.on)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command(
"Turning the light off failed.", self._light.off)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
class XiaomiPhilipsGenericLight(XiaomiPhilipsAbstractLight):
"""Representation of a Generic Xiaomi Philips Light."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._state_attrs.update({
ATTR_SCENE: None,
ATTR_DELAYED_TURN_OFF: None,
})
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF])
self._state_attrs.update({
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
})
async def async_set_scene(self, scene: int = 1):
"""Set the fixed scene."""
await self._try_command(
"Setting a fixed scene failed.",
self._light.set_scene, scene)
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._light.delay_off, time_period.total_seconds())
@staticmethod
def delayed_turn_off_timestamp(countdown: int,
current: datetime,
previous: datetime):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(microsecond=0) + \
timedelta(seconds=countdown)
if previous is None:
return new
lower = timedelta(seconds=-DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
upper = timedelta(seconds=DELAYED_TURN_OFF_MAX_DEVIATION_SECONDS)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsBulb(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Bulb."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._color_temp = None
@property
def color_temp(self):
"""Return the color temperature."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 333
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
color_temp = kwargs[ATTR_COLOR_TEMP]
percent_color_temp = self.translate(
color_temp, self.max_mireds,
self.min_mireds, CCT_MIN, CCT_MAX)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
if ATTR_BRIGHTNESS in kwargs and ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting brightness and color temperature: "
"%s %s%%, %s mireds, %s%% cct",
brightness, percent_brightness,
color_temp, percent_color_temp)
result = await self._try_command(
"Setting brightness and color temperature failed: "
"%s bri, %s cct",
self._light.set_brightness_and_color_temperature,
percent_brightness, percent_color_temp)
if result:
self._color_temp = color_temp
self._brightness = brightness
elif ATTR_COLOR_TEMP in kwargs:
_LOGGER.debug(
"Setting color temperature: "
"%s mireds, %s%% cct",
color_temp, percent_color_temp)
result = await self._try_command(
"Setting color temperature failed: %s cct",
self._light.set_color_temperature, percent_color_temp)
if result:
self._color_temp = color_temp
elif ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug(
"Setting brightness: %s %s%%",
brightness, percent_brightness)
result = await self._try_command(
"Setting brightness failed: %s",
self._light.set_brightness, percent_brightness)
if result:
self._brightness = brightness
else:
await self._try_command(
"Turning the light on failed.", self._light.on)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature,
CCT_MIN, CCT_MAX,
self.max_mireds, self.min_mireds)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF])
self._state_attrs.update({
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
})
@staticmethod
def translate(value, left_min, left_max, right_min, right_max):
"""Map a value from left span to right span."""
left_span = left_max - left_min
right_span = right_max - right_min
value_scaled = float(value - left_min) / float(left_span)
return int(right_min + (value_scaled * right_span))
class XiaomiPhilipsCeilingLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Ceiling Lamp."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._state_attrs.update({
ATTR_NIGHT_LIGHT_MODE: None,
ATTR_AUTOMATIC_COLOR_TEMPERATURE: None,
})
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 175
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 370
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature,
CCT_MIN, CCT_MAX,
self.max_mireds, self.min_mireds)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF])
self._state_attrs.update({
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_AUTOMATIC_COLOR_TEMPERATURE:
state.automatic_color_temperature,
})
class XiaomiPhilipsEyecareLamp(XiaomiPhilipsGenericLight):
"""Representation of a Xiaomi Philips Eyecare Lamp 2."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._state_attrs.update({
ATTR_REMINDER: None,
ATTR_NIGHT_LIGHT_MODE: None,
ATTR_EYECARE_MODE: None,
})
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
delayed_turn_off = self.delayed_turn_off_timestamp(
state.delay_off_countdown,
dt.utcnow(),
self._state_attrs[ATTR_DELAYED_TURN_OFF])
self._state_attrs.update({
ATTR_SCENE: state.scene,
ATTR_DELAYED_TURN_OFF: delayed_turn_off,
ATTR_REMINDER: state.reminder,
ATTR_NIGHT_LIGHT_MODE: state.smart_night_light,
ATTR_EYECARE_MODE: state.eyecare,
})
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off."""
await self._try_command(
"Setting the turn off delay failed.",
self._light.delay_off, round(time_period.total_seconds() / 60))
async def async_reminder_on(self):
"""Enable the eye fatigue notification."""
await self._try_command(
"Turning on the reminder failed.",
self._light.reminder_on)
async def async_reminder_off(self):
"""Disable the eye fatigue notification."""
await self._try_command(
"Turning off the reminder failed.",
self._light.reminder_off)
async def async_night_light_mode_on(self):
"""Turn the smart night light mode on."""
await self._try_command(
"Turning on the smart night light mode failed.",
self._light.smart_night_light_on)
async def async_night_light_mode_off(self):
"""Turn the smart night light mode off."""
await self._try_command(
"Turning off the smart night light mode failed.",
self._light.smart_night_light_off)
async def async_eyecare_mode_on(self):
"""Turn the eyecare mode on."""
await self._try_command(
"Turning on the eyecare mode failed.",
self._light.eyecare_on)
async def async_eyecare_mode_off(self):
"""Turn the eyecare mode off."""
await self._try_command(
"Turning off the eyecare mode failed.",
self._light.eyecare_off)
@staticmethod
def delayed_turn_off_timestamp(countdown: int,
current: datetime,
previous: datetime):
"""Update the turn off timestamp only if necessary."""
if countdown is not None and countdown > 0:
new = current.replace(second=0, microsecond=0) + \
timedelta(minutes=countdown)
if previous is None:
return new
lower = timedelta(minutes=-DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
upper = timedelta(minutes=DELAYED_TURN_OFF_MAX_DEVIATION_MINUTES)
diff = previous - new
if lower < diff < upper:
return previous
return new
return None
class XiaomiPhilipsEyecareLampAmbientLight(XiaomiPhilipsAbstractLight):
"""Representation of a Xiaomi Philips Eyecare Lamp Ambient Light."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
name = '{} Ambient Light'.format(name)
if unique_id is not None:
unique_id = "{}-{}".format(unique_id, 'ambient')
super().__init__(name, light, model, unique_id)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
percent_brightness = ceil(100 * brightness / 255.0)
_LOGGER.debug(
"Setting brightness of the ambient light: %s %s%%",
brightness, percent_brightness)
result = await self._try_command(
"Setting brightness of the ambient failed: %s",
self._light.set_ambient_brightness, percent_brightness)
if result:
self._brightness = brightness
else:
await self._try_command(
"Turning the ambient light on failed.", self._light.ambient_on)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._try_command(
"Turning the ambient light off failed.", self._light.ambient_off)
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.ambient
self._brightness = ceil((255 / 100.0) * state.ambient_brightness)
class XiaomiPhilipsMoonlightLamp(XiaomiPhilipsBulb):
"""Representation of a Xiaomi Philips Zhirui Bedside Lamp."""
def __init__(self, name, light, model, unique_id):
"""Initialize the light device."""
super().__init__(name, light, model, unique_id)
self._hs_color = None
self._state_attrs.pop(ATTR_DELAYED_TURN_OFF)
self._state_attrs.update({
ATTR_SLEEP_ASSISTANT: None,
ATTR_SLEEP_OFF_TIME: None,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME: None,
ATTR_BRAND_SLEEP: None,
ATTR_BRAND: None,
})
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 153
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 588
@property
def hs_color(self) -> tuple:
"""Return the hs color value."""
return self._hs_color
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
try:
state = await self.hass.async_add_executor_job(self._light.status)
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
return
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._brightness = ceil((255 / 100.0) * state.brightness)
self._color_temp = self.translate(
state.color_temperature,
CCT_MIN, CCT_MAX,
self.max_mireds, self.min_mireds)
self._hs_color = color.color_RGB_to_hs(*state.rgb)
self._state_attrs.update({
ATTR_SCENE: state.scene,
ATTR_SLEEP_ASSISTANT: state.sleep_assistant,
ATTR_SLEEP_OFF_TIME: state.sleep_off_time,
ATTR_TOTAL_ASSISTANT_SLEEP_TIME:
state.total_assistant_sleep_time,
ATTR_BRAND_SLEEP: state.brand_sleep,
ATTR_BRAND: state.brand,
})
async def async_set_delayed_turn_off(self, time_period: timedelta):
"""Set delayed turn off. Unsupported."""
return
| |
# #######################################################################
# Copyright (c) 2013, Bob Novas, Shinkuro, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# #######################################################################
"""
This is a port of Squery.java (ogud@shinkuro.com) to Python
This code requires dnspython (dnspython.org)
$Id: Squery.py 447 2013-08-22 18:05:56Z bob.novas $
"""
import dns
import dns.resolver
import dns.version
from dns.resolver import Answer, NoMetaqueries, NoNameservers, NoAnswer, NXDOMAIN
import sys
import socket
import time
class Resolver(dns.resolver.Resolver):
"""
override dnspython query - the stock implementation of query does not return
an answer on SERVFAIL, it either times out retrying if retry_servfail is True,
or removes the nameserver from the resolver (brilliant!) if retry_servfail is False.
This implementation just returns the answer on SERVFAIL, which is what we need.
Same for REFUSED - return an answer.
TBD: need to fix the truncation code- this implementation ALWAYS retries truncated w/TCP.
"""
def __init__(self, filename='/etc/resolv.conf', configure=True):
super(Resolver, self).__init__(filename='/etc/resolv.conf', configure=True)
def reset(self):
"""
Reset the subclass behavior variables.
These variables change the behavior of the superclass query method. The
default is the old behavior. Setting a variable to True enables the new
behavior.
return_servfail - return an Answer when the resolver returns SERVFAIL
return_refused - ditto, for REFUSED
no_tcp_on_tc - don't fallback to TCP if UDP gets TC (truncate) bit
"""
super(Resolver, self).reset()
self.return_servfail = False
self.return_refused = False
self.no_tcp_on_tc = False
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True, source_port=0):
"""Query nameservers to find the answer to the question.
The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects
of the appropriate type, or strings that can be converted into objects
of the appropriate type. E.g. For I{rdtype} the integer 2 and the
the string 'NS' both mean to query for records with DNS rdata type NS.
@param qname: the query name
@type qname: dns.name.Name object or string
@param rdtype: the query type
@type rdtype: int or string
@param rdclass: the query class
@type rdclass: int or string
@param tcp: use TCP to make the query (default is False).
@type tcp: bool
@param source: bind to this IP address (defaults to machine default IP).
@type source: IP address in dotted quad notation
@param raise_on_no_answer: raise NoAnswer if there's no answer
(defaults is True).
@type raise_on_no_answer: bool
@param source_port: The port from which to send the message.
The default is 0.
@type source_port: int
@rtype: dns.resolver.Answer instance
@raises Timeout: no answers could be found in the specified lifetime
@raises NXDOMAIN: the query name does not exist
@raises YXDOMAIN: the query name is too long after DNAME substitution
@raises NoAnswer: the response did not contain an answer and
raise_on_no_answer is True.
@raises NoNameservers: no non-broken nameservers are available to
answer the question."""
if isinstance(qname, (str, unicode)):
qname = dns.name.from_text(qname, None)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if dns.rdatatype.is_metatype(rdtype):
raise NoMetaqueries
if isinstance(rdclass, (str, unicode)):
rdclass = dns.rdataclass.from_text(rdclass)
if dns.rdataclass.is_metaclass(rdclass):
raise NoMetaqueries
qnames_to_try = []
if qname.is_absolute():
qnames_to_try.append(qname)
else:
if len(qname) > 1:
qnames_to_try.append(qname.concatenate(dns.name.root))
if self.search:
for suffix in self.search:
qnames_to_try.append(qname.concatenate(suffix))
else:
qnames_to_try.append(qname.concatenate(self.domain))
all_nxdomain = True
start = time.time()
for qname in qnames_to_try:
if self.cache:
answer = self.cache.get((qname, rdtype, rdclass))
if not answer is None:
if answer.rrset is None and raise_on_no_answer:
raise NoAnswer
else:
return answer
request = dns.message.make_query(qname, rdtype, rdclass)
if not self.keyname is None:
request.use_tsig(self.keyring, self.keyname,
algorithm=self.keyalgorithm)
request.use_edns(self.edns, self.ednsflags, self.payload)
if self.flags is not None:
request.flags = self.flags
response = None
#
# make a copy of the servers list so we can alter it later.
#
nameservers = self.nameservers[:]
backoff = 0.10
while response is None:
if len(nameservers) == 0:
raise NoNameservers
for nameserver in nameservers[:]:
timeout = self._compute_timeout(start)
try:
if tcp:
response = dns.query.tcp(request, nameserver,
timeout, self.port,
source=source,
source_port=source_port)
else:
response = dns.query.udp(request, nameserver,
timeout, self.port,
source=source,
source_port=source_port)
if response.flags & dns.flags.TC and not self.no_tcp_on_tc:
# Response truncated; retry with TCP.
timeout = self._compute_timeout(start)
response = dns.query.tcp(request, nameserver,
timeout, self.port,
source=source,
source_port=source_port)
except (socket.error, dns.exception.Timeout):
#
# Communication failure or timeout. Go to the
# next server
#
response = None
continue
except dns.query.UnexpectedSource:
#
# Who knows? Keep going.
#
response = None
continue
except dns.exception.FormError:
#
# We don't understand what this server is
# saying. Take it out of the mix and
# continue.
#
nameservers.remove(nameserver)
response = None
continue
except EOFError:
#
# We're using TCP and they hung up on us.
# Probably they don't support TCP (though
# they're supposed to!). Take it out of the
# mix and continue.
#
nameservers.remove(nameserver)
response = None
continue
rcode = response.rcode()
if rcode == dns.rcode.YXDOMAIN:
raise YXDOMAIN
if rcode == dns.rcode.NOERROR or \
rcode == dns.rcode.NXDOMAIN or \
(rcode == dns.rcode.SERVFAIL and self.return_servfail) or \
(rcode == dns.rcode.REFUSED and self.return_refused):
break
#
# We got a response, but we're not happy with the
# rcode in it. Remove the server from the mix if
# the rcode isn't SERVFAIL.
#
if rcode != dns.rcode.SERVFAIL or not self.retry_servfail:
nameservers.remove(nameserver)
response = None
if not response is None:
break
#
# All nameservers failed!
#
if len(nameservers) > 0:
#
# But we still have servers to try. Sleep a bit
# so we don't pound them!
#
timeout = self._compute_timeout(start)
sleep_time = min(timeout, backoff)
backoff *= 2
time.sleep(sleep_time)
if response.rcode() == dns.rcode.NXDOMAIN:
continue
all_nxdomain = False
break
if all_nxdomain:
raise NXDOMAIN
answer = Answer(qname, rdtype, rdclass, response,
raise_on_no_answer)
if self.cache:
self.cache.put((qname, rdtype, rdclass), answer)
return answer
class Squery(object):
"""
"""
TTL = -1
def __init__(self):
dnspython_version = dns.version.version
if dnspython_version < '1.11.0':
raise ValueError("You have dnspython %s. You need dnspython 1.11.0 or better"
% (dnspython_version, ))
self.saw_timeout = False
self.set_zone("submit.dnssecready.net.")
self.ignore_truncation = True
self.ttl = 0
@classmethod
def println(cls, o):
"""
Print a debug message
@param o: message to print
@type o: L{string}
"""
print o
def set_ttl(self, val):
"""
Set the Time to Live value of a query
@param val: time to live value
@type val: L{float}
"""
self.ttl = val
def get_ttl(self):
"""
Get the Time to Live value of a query
@rtype: L{float}
@return: Time to live value
"""
return self.ttl
def set_ignoreTruncation(self, val):
self.ignore_truncation = val
def get_ignoreTruncation(self):
return self.ignore_truncation
def query_timeout(self):
"""
Query whether a timeout occurred.
@rtype: L{boolean}
@return: Timeout exception occurred in a query on this object
"""
return self.saw_timeout
def set_zone(self, zone_name):
"""
Set the zone name and the getting_address
@param zone_name: the zone name
@type zone_name: L{string}
"""
self.zone_name = zone_name
self.getting_address = "whatsmyip." + self.zone_name
def get_zone(self):
"""
Get the zone_name
@rtype: L{string}
@return: The zone name
"""
return self.zone_name
@classmethod
def Str_to_Name(cls, name):
"""
Translate a string to a dns.Name object
@param name: the name to translate
@type name: L{string}
"""
my_name = None
try:
my_name = dns.name.from_text(name, dns.name.root)
except:
cls.println("Name error rrsig_check: %s" % (name, ))
return my_name
@classmethod
def get_resolver(cls, resolver, debug=False, tcp=False):
"""
@param resolver: The resolver to query, as a dotted IPv4 address
@type resolver: L{string}
@param debug: if True, print some debug output
@type debug: L{boolean}
@rtype: L{dns.resolver.Resolver}
@return: an instance of a resolver object
"""
try:
rslvr = Resolver()
rslvr.nameservers = [resolver]
# Tack a instance variable onto the Resolver instance
# that says whether to use TCP or not. If use_tcp is
# True, then the queries will be over TCP only.
rslvr.use_tcp = tcp
# Set the alternate behavior flags to return an Answer
# on SERVFAIL and REFUSED.
rslvr.return_servfail = True
rslvr.return_refused = True
# change the timeout (lifetime) to 10 seconds from the default 30.
rslvr.lifetime = 10.0
# for debugging the resolver query code, make the lifetime huge
#rslvr.lifetime = 1000000.0
if debug:
cls.println("Resolver=%s" % (rslvr, ))
return rslvr
except:
return None
def make_query(self, domain, rdatatype, resolver, debug=False, noRec=False):
"""
@param domain: domain name to issue query against
@type domain: L{string}
@param rdatatype: type of RRSet queried for
@type rdatatype: L{dns.rdatatype}
@param resolver: resolver to ask to answer the query
@type resolver: L{dns.resolver.Resolver}
@param debug: if True, print some debug output
@type debug: L{boolean}
@param noRec: No Recursion (set the RD flag in the query)
@type noRec: L{Boolean}
"""
name = self.Str_to_Name(domain)
if not name:
return name
if noRec:
flags = resolver.flags
flags &= ~dns.flags.RD
resolver.set_flags(dns.flags.RD)
ans = None
try:
ans = resolver.query(name, rdatatype, rdclass=dns.rdataclass.IN, tcp=resolver.use_tcp, raise_on_no_answer=False)
if debug: self.println("%s %s: %s" % (domain, dns.rdatatype.to_text(rdatatype), ans.response.to_text(), ))
except dns.exception.Timeout:
self.saw_timeout = True
if debug: self.println("Exception: timeout")
except dns.resolver.NoAnswer:
if debug: self.println("Exception: %s" % (sys.exc_info()[0], ))
except:
if debug: self.println("Exception: %s" % (sys.exc_info()[0], ))
return ans
def addr_lookup(self, resolver, name, debug=False):
"""
Lookup the address of name using a given resolver
@param resolver: resolver to ask to answer the query
@type resolver: L{string}
@param name: the name of the resolver to lookup
@type name: L{string}
@param debug: if True, print some debug output
@type debug: L{boolean}
"""
if not isinstance(resolver, basestring):
raise ValueError("resolver should be a dotted ip address string")
reslvr = self.get_resolver(resolver, debug=debug)
if not reslvr: return reslvr
ans = self.make_query(name, dns.rdatatype.A, reslvr, debug)
if ans and ans.response and ans.response.answer:
rdataset = ans.response.answer[0].to_rdataset()
if rdataset:
#print "type(rdataset)=%s" % (type(rdataset), )
#print "type(rdataset[0])=%s" % (type(rdataset[0]), )
return rdataset[0].to_text()
return None
def Forged_Address(self, resolver, debug=False):
"""
Determine if a resolver is returning a forged address ty testing
against the "well known" address of fixedaddress.dnssecready.net
@param resolver: the IPv4 dotted address of the resolver to use
@type resolver: L{string}
@param debug: if True, print some debug output
@type debug: L{boolean}
"""
tst = self.addr_lookup(resolver, "fixedaddress.dnssecready.net.", debug)
return not (tst == "127.252.253.254")
| |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the log_process.py module."""
import datetime
import os
import re
import time
from unittest import mock
from gazoo_device import errors
from gazoo_device.capabilities import event_parser_default
from gazoo_device.switchboard import log_process
from gazoo_device.switchboard import switchboard_process
from gazoo_device.tests.unit_tests.utils import unit_test_case
from gazoo_device.utility import multiprocessing_utils
_NEW_LOG_FILE_MESSAGE = "<2017-07-01 12:23:43.123456> GDM-0: {}".format(
log_process.NEW_LOG_FILE_MESSAGE)
_FULL_LOG_MESSAGE = "<2017-07-01 12:23:43.123456> GDM-0: Full log line\n"
_PARTIAL_LOG_MESSAGE = ("<2017-07-01 12:23:43.123456> GDM-1: my-prompt> "
"\ufffd\ufffd")
_ROTATE_LOG_MESSAGE = "<2017-07-01 12:23:43.123456> GDM-0: {}".format(
log_process.ROTATE_LOG_MESSAGE)
_SHORT_LOG_MESSAGE = "<2017-07-01 12:23:43.123456> GDM-0: Short line\n"
_EVENT_LOG_MESSAGE = ("<2017-07-01 12:23:43.123456> GDM-0: [APPL] Some "
"non-existent message with non-ascii \xf7\n")
_EVENT_LINE_RETURN_LOG_MESSAGE = (
"<2017-07-01 12:23:43.123456> GDM-0: \r[APPL] "
"Some non-existent message with extra line return chars\r")
_WRITE_TIMEOUT = 1
wait_for_queue_writes = switchboard_process.wait_for_queue_writes
def get_file_size(file_path, size=0, timeout=_WRITE_TIMEOUT):
end_time = time.time() + timeout
while not os.path.isfile(file_path) and time.time() < end_time:
time.sleep(0.001)
filesize = os.path.getsize(file_path)
while filesize <= size and time.time() < end_time:
time.sleep(0.001)
filesize = os.path.getsize(file_path)
return os.path.getsize(file_path)
class LogFilterProcessTests(unit_test_case.MultiprocessingTestCase):
def setUp(self):
super().setUp()
self.mock_parser = mock.MagicMock(
spec=event_parser_default.EventParserDefault)
self.command_queue = multiprocessing_utils.get_context().Queue()
def tearDown(self):
if hasattr(self, "uut"):
self.uut._post_run_hook() # close any open files
del self.uut
del self.command_queue # Release shared memory file descriptors.
super().tearDown()
def test_000_log_filter_construct_destruct(self):
"""Test LogFilterProcess constructing and destructing raises no errors."""
filters = []
parser_obj = event_parser_default.EventParserDefault(
filters, event_file_path="/foo.txt", device_name="device-1234")
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
log_path)
self.assertFalse(self.uut.is_started(),
"Expected process not started, found started")
self.assertFalse(self.uut.is_running(),
"Expected process to not running, found running")
def test_001_log_filter_cant_open_log_file(self):
"""Test log filter unable to open log file."""
filters = []
parser_obj = event_parser_default.EventParserDefault(
filters, event_file_path="/foo.txt", device_name="device-1234")
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
event_path = log_process.get_event_filename(log_path)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
log_path)
self.uut._pre_run_hook()
self.uut._do_work()
self.uut._post_run_hook()
self.assertFalse(
os.path.exists(event_path),
"Expected event file {} to not exist, but it exists".format(event_path))
def test_002_log_filter_creates_event_file(self):
"""Test log filter creates event file after opening log file."""
filter_file = os.path.join(self.TEST_FILTER_DIR,
"optional_description.json")
parser_obj = event_parser_default.EventParserDefault(
[filter_file], event_file_path="/foo.txt", device_name="device-1234")
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
event_path = log_process.get_event_filename(log_path)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
log_path)
self._append_to_log_file(log_path)
self.uut._pre_run_hook()
self.uut._do_work() # opens log file
self.uut._do_work() # writes first event
filesize = get_file_size(event_path)
self.uut._post_run_hook()
self.assertTrue(
os.path.exists(event_path),
"Expected event file {} to exist, but it doesn't exist".format(
event_path))
self.assertGreater(
filesize, 0,
"Expected event file {} size > 0 found {}".format(event_path, filesize))
def test_003_log_filter_tails_log_file(self):
"""Test log filter tails log file for new lines added."""
filter_file = os.path.join(self.TEST_FILTER_DIR,
"optional_description.json")
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
event_path = log_process.get_event_filename(log_path)
parser_obj = event_parser_default.EventParserDefault(
[filter_file], event_file_path=event_path, device_name="device-1234")
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
log_path)
self._append_to_log_file(log_path)
self.uut._pre_run_hook()
self.uut._do_work() # opens log file
self.uut._do_work() # writes first event
filesize1 = get_file_size(event_path)
self.uut._do_work() # reads no log line
self._append_to_log_file(log_path)
self.uut._do_work() # writes second event
filesize2 = get_file_size(event_path, size=filesize1)
self.uut._post_run_hook()
self.assertTrue(
os.path.exists(event_path),
"Expected event file {} to exist, but it doesn't exist".format(
event_path))
self.assertGreater(
filesize2, filesize1,
"Expected event file {} to grow from size {}, but found {}".format(
event_path, filesize1, filesize2))
def test_004_log_filter_handles_line_return_chars(self):
"""Test log filter tails log file for new lines added."""
filter_file = os.path.join(self.TEST_FILTER_DIR,
"optional_description.json")
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
event_path = log_process.get_event_filename(log_path)
parser_obj = event_parser_default.EventParserDefault(
[filter_file], event_file_path=event_path, device_name="device-1234")
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
log_path)
self._append_to_log_file(log_path, log_line=_EVENT_LINE_RETURN_LOG_MESSAGE)
self.uut._pre_run_hook()
self.uut._do_work() # opens log file
self.uut._do_work() # skips writing event due to missing \n
filesize1 = get_file_size(event_path)
self.uut._do_work() # reads no log line
self._append_to_log_file(log_path, log_line="\r\n")
self.uut._do_work() # writes first event
filesize2 = get_file_size(event_path, size=filesize1)
self.uut._post_run_hook()
self.assertTrue(
os.path.exists(event_path),
"Expected event file {} to exist, but it doesn't exist".format(
event_path))
self.assertEqual(
0, filesize1, "Expected event file {} to be size 0 but found {}".format(
event_path, filesize1))
self.assertLess(
filesize1, filesize2,
"Expected event file {} to grow from size {}, but found {}".format(
event_path, filesize1, filesize2))
def test_100_log_filter_rejects_invalid_command(self):
"""Test LogFilterProcess rejects invalid command."""
filters = []
parser_obj = event_parser_default.EventParserDefault(
filters, event_file_path="/foo.txt", device_name="device-1234")
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
log_path)
self.command_queue.put(("invalid cmd", None))
wait_for_queue_writes(self.command_queue)
self.uut._pre_run_hook()
with self.assertRaisesRegex(RuntimeError, "received an unknown command"):
self.uut._do_work()
self.uut._post_run_hook()
def test_101_log_filter_accepts_valid_common_commands(self):
"""Test LogFilterProcess send_command accepts valid common commands."""
filters = []
parser_obj = event_parser_default.EventParserDefault(
filters, event_file_path="/foo.txt", device_name="device-1234")
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
log_path)
for command in log_process._VALID_COMMON_COMMANDS:
self.uut.send_command(command)
wait_for_queue_writes(self.command_queue)
self.assertFalse(
self.command_queue.empty(),
"Expected command queue {} to not be empty".format(
self.command_queue))
command_message = self.command_queue.get()
self.assertEqual(
command, command_message[0],
"Expected command {} found {}".format(command, command_message[0]))
def test_120_load_filter_file_returns_error(self):
"""Verifies ParserError seen by method _do_work."""
filter_file = os.path.join(self.TEST_FILTER_DIR,
"optional_description.json")
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue,
self.mock_parser, log_path)
self._append_to_log_file(log_path)
self.mock_parser.load_filter_file.side_effect = errors.ParserError(
"Adding new filter failed")
with self.assertRaisesRegex(errors.ParserError, "Adding new filter failed"):
self.uut._pre_run_hook()
self.uut.send_command(log_process.CMD_ADD_NEW_FILTER, filter_file)
wait_for_queue_writes(self.command_queue)
self.uut._do_work() # loads filter file
def test_121_load_filter_file_adds_new_filter(self):
"""Verifies can add new filter file."""
filter_file = os.path.join(self.TEST_FILTER_DIR,
"optional_description.json")
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue,
self.mock_parser, log_path)
self._append_to_log_file(log_path)
self.uut._pre_run_hook()
self.uut.send_command(log_process.CMD_ADD_NEW_FILTER, filter_file)
wait_for_queue_writes(self.command_queue)
self.uut._do_work() # loads filter file
self.uut._post_run_hook()
self.mock_parser.load_filter_file.assert_called_once_with(filter_file)
def test_200_log_filter_uses_new_log_file(self):
"""Test switching LogFilterProcess to use new log file specified."""
filters = []
parser_obj = event_parser_default.EventParserDefault(
filters, event_file_path="/foo.txt", device_name="device-1234")
old_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-old.txt")
new_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-new.txt")
old_event_path = log_process.get_event_filename(old_log_path)
new_event_path = log_process.get_event_filename(new_log_path)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
old_log_path)
self._append_to_log_file(old_log_path)
self._append_to_log_file(new_log_path)
self.uut._pre_run_hook()
self.uut._do_work(
) # Opens old log and event files and processes first line
switchboard_process.put_message(
self.command_queue, (log_process.CMD_NEW_LOG_FILE, new_log_path))
wait_for_queue_writes(self.command_queue)
self._append_to_log_file(old_log_path, log_line=_NEW_LOG_FILE_MESSAGE)
self.uut._do_work() # Process next line in old log file and closes it
self.assertTrue(
os.path.exists(old_event_path),
"Expected {} to exist".format(old_event_path))
self.assertFalse(
os.path.exists(new_event_path),
"Expected {} to not exist".format(new_event_path))
self.uut._do_work() # Opens new log and event files
self.assertTrue(
os.path.exists(old_event_path),
"Expected {} to exist".format(old_event_path))
self.assertTrue(
os.path.exists(new_event_path),
"Expected {} to exist".format(new_event_path))
self.uut._post_run_hook()
def test_201_log_filter_ignores_extra_new_log_file_message(self):
"""Test LogFilterProcess ignores spurious new log file message."""
filters = []
parser_obj = event_parser_default.EventParserDefault(
filters, event_file_path="/foo.txt", device_name="device-1234")
old_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-old.txt")
new_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-new.txt")
old_event_path = log_process.get_event_filename(old_log_path)
new_event_path = log_process.get_event_filename(new_log_path)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
old_log_path)
self._append_to_log_file(old_log_path)
self._append_to_log_file(new_log_path)
self.uut._pre_run_hook()
self.uut._do_work(
) # Opens old log and event files and processes first line
self._append_to_log_file(old_log_path, log_line=_NEW_LOG_FILE_MESSAGE)
self.uut._do_work() # Process next line in and tries to switch to next log
self.assertTrue(
os.path.exists(old_event_path),
"Expected {} to exist".format(old_event_path))
self.assertFalse(
os.path.exists(new_event_path),
"Expected {} to not exist".format(new_event_path))
switchboard_process.put_message(
self.command_queue, (log_process.CMD_NEW_LOG_FILE, new_log_path))
wait_for_queue_writes(self.command_queue)
self._append_to_log_file(old_log_path, log_line=_NEW_LOG_FILE_MESSAGE)
self.uut._do_work() # Process next line in old log file and closes it
self.uut._do_work(
) # Opens new log and event files and processes first line
self.assertTrue(
os.path.exists(old_event_path),
"Expected {} to exist".format(old_event_path))
self.assertTrue(
os.path.exists(new_event_path),
"Expected {} to exist".format(new_event_path))
self.uut._post_run_hook()
def test_210_log_filter_rotates_log_file_only(self):
"""Test LogFilterProcess rotates to next log file only."""
filters = []
parser_obj = event_parser_default.EventParserDefault(
filters, event_file_path="/foo.txt", device_name="device-1234")
old_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device.txt")
next_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device.00001.txt")
old_event_path = log_process.get_event_filename(old_log_path)
next_event_path = log_process.get_event_filename(next_log_path)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
old_log_path)
self._append_to_log_file(old_log_path)
self._append_to_log_file(next_log_path)
self.uut._pre_run_hook()
self.uut._do_work(
) # Opens old log and event files and processes first line
self._append_to_log_file(old_log_path, log_line=_ROTATE_LOG_MESSAGE)
self.uut._do_work() # Process next line in old log file and closes it
self.assertTrue(
os.path.exists(old_event_path),
"Expected {} to exist".format(old_event_path))
self.assertFalse(
os.path.exists(next_event_path),
"Expected {} to not exist".format(next_event_path))
self.uut._do_work(
) # Opens next log and event files and processes first line
self.assertTrue(
os.path.exists(old_event_path),
"Expected {} to exist".format(old_event_path))
self.assertFalse(
os.path.exists(next_event_path),
"Expected {} to not exist".format(next_event_path))
self.uut._post_run_hook()
def test_211_log_filter_new_log_message_doesnt_trigger_rotate(self):
"""Test rotate log message appears after new log file command."""
filters = []
parser_obj = event_parser_default.EventParserDefault(
filters, event_file_path="/foo.txt", device_name="device-1234")
old_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-old.txt")
old_event_path = log_process.get_event_filename(old_log_path)
new_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-new.txt")
new_event_path = log_process.get_event_filename(new_log_path)
next_log_path1 = os.path.join(self.artifacts_directory,
self._testMethodName,
"fake-device-old.00001.txt")
next_event_path1 = log_process.get_event_filename(next_log_path1)
next_log_path2 = os.path.join(self.artifacts_directory,
self._testMethodName,
"fake-device-new.00001.txt")
next_event_path2 = log_process.get_event_filename(next_log_path2)
self.uut = log_process.LogFilterProcess("fake_device", self.exception_queue,
self.command_queue, parser_obj,
old_log_path)
self._append_to_log_file(old_log_path)
self._append_to_log_file(next_log_path1)
self._append_to_log_file(new_log_path)
self.uut._pre_run_hook()
self.uut._do_work(
) # Opens old log and event files and processes first line
switchboard_process.put_message(
self.command_queue, (log_process.CMD_NEW_LOG_FILE, new_log_path))
wait_for_queue_writes(self.command_queue)
self._append_to_log_file(old_log_path, log_line=_ROTATE_LOG_MESSAGE)
self.uut._do_work(
) # Process next line in old log file and rotates log file
self.assertTrue(
os.path.exists(old_event_path),
"Expected {} to exist".format(old_event_path))
self.assertFalse(
os.path.exists(new_event_path),
"Expected {} to not exist".format(new_event_path))
self.assertFalse(
os.path.exists(next_event_path1),
"Expected {} to not exist".format(next_event_path1))
self.assertFalse(
os.path.exists(next_event_path2),
"Expected {} to not exist".format(next_event_path2))
self._append_to_log_file(next_log_path1, log_line=_NEW_LOG_FILE_MESSAGE)
self.uut._do_work(
) # Process next line in next log file 1 and opens new log file
self.assertTrue(
os.path.exists(old_event_path),
"Expected {} to exist".format(old_event_path))
self.assertFalse(
os.path.exists(new_event_path),
"Expected {} to not exist".format(new_event_path))
self.assertFalse(
os.path.exists(next_event_path1),
"Expected {} to not exist".format(next_event_path1))
self.assertFalse(
os.path.exists(next_event_path2),
"Expected {} to not exist".format(next_event_path2))
self.uut._do_work() # Keeps reading from new log file
self.assertTrue(
os.path.exists(old_event_path),
"Expected {} to exist".format(old_event_path))
self.assertTrue(
os.path.exists(new_event_path),
"Expected {} to exist".format(new_event_path))
self.assertFalse(
os.path.exists(next_event_path1),
"Expected {} to not exist".format(next_event_path1))
self.assertFalse(
os.path.exists(next_event_path2),
"Expected {} to not exist".format(next_event_path2))
self.uut._post_run_hook()
def _append_to_log_file(self, log_path, log_line=_EVENT_LOG_MESSAGE):
if not os.path.exists(os.path.dirname(log_path)):
os.makedirs(os.path.dirname(log_path))
with open(log_path, "a", encoding="utf-8") as log_file:
log_file.write(log_line)
log_file.flush()
class LogWriterProcessTests(unit_test_case.MultiprocessingTestCase):
def setUp(self):
super().setUp()
self.command_queue = multiprocessing_utils.get_context().Queue()
self.log_queue = multiprocessing_utils.get_context().Queue()
def tearDown(self):
if hasattr(self, "uut"):
del self.uut
del self.command_queue # Release shared memory file descriptors.
del self.log_queue
super().tearDown()
def test_000_log_writer_construct_destruct(self):
"""Test LogWriterProcess constructing and destructing raises no errors."""
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogWriterProcess("fake_device", self.exception_queue,
self.command_queue, self.log_queue,
log_path)
self.assertFalse(self.uut.is_started(),
"Expected process not started, found started")
self.assertFalse(self.uut.is_running(),
"Expected process to not running, found running")
def test_001_log_message_adds_log_timestamp(self):
"""Test log_message method adds host system timestamp."""
port = 0
raw_log_line = "my log line"
log_process.log_message(self.log_queue, raw_log_line, port)
wait_for_queue_writes(self.log_queue)
log_line = self.log_queue.get()
# Log line format check
self.assertIsInstance(
log_line, str,
"Expected log line to be unicode, found {}".format(type(log_line)))
# System timestamp checks
timestamp_length = log_process.HOST_TIMESTAMP_LENGTH
host_timestamp = datetime.datetime.strptime(
log_line[:timestamp_length], log_process.HOST_TIMESTAMP_FORMAT)
self.assertIsInstance(
host_timestamp, datetime.datetime,
"Expected datetime found {}".format(type(host_timestamp)))
log_line_without_timestamp = log_line[timestamp_length:]
# Log line header checks
match = re.search(log_process.LOG_LINE_HEADER_FORMAT,
log_line_without_timestamp)
self.assertIsNotNone(
match, "Expected log line header format {} to match {}".format(
log_process.LOG_LINE_HEADER_FORMAT, log_line_without_timestamp))
self.assertEqual(
str(port), match.group(1),
"Expected log line source port {} to equal {} from line header".format(
port, match.group(1)))
self.assertEqual(
raw_log_line, match.group(2),
"Expected raw log line {} to equal {} from line header".format(
raw_log_line, match.group(2)))
def test_002_get_next_log_filename_no_counter(self):
"""Test get_next_log_filename handles file with no log counter."""
current_log_filename = (
"/tmp/TestSuite.prefix-device-1234-20180912-111222.txt")
expected_log_filename = (
"/tmp/TestSuite.prefix-device-1234-20180912-111222.00001.txt")
log_filename = log_process.get_next_log_filename(current_log_filename)
self.assertEqual(
expected_log_filename, log_filename,
"Expected log filename {} found {}".format(expected_log_filename,
log_filename))
def test_003_get_next_log_filename_with_counter(self):
"""Test get_next_log_filename handles file with log counter."""
current_log_filename = (
"/tmp/TestSuite.prefix-device-0203-20180912-111222.99998.txt")
expected_log_filename = (
"/tmp/TestSuite.prefix-device-0203-20180912-111222.99999.txt")
log_filename = log_process.get_next_log_filename(current_log_filename)
self.assertEqual(
expected_log_filename, log_filename,
"Expected log filename {} found {}".format(expected_log_filename,
log_filename))
def test_010_log_writer_writes_full_log_line(self):
"""Test writing full log line to file."""
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogWriterProcess("fake_device", self.exception_queue,
self.command_queue, self.log_queue,
log_path)
switchboard_process.put_message(self.log_queue, _FULL_LOG_MESSAGE)
wait_for_queue_writes(self.log_queue)
self.uut._pre_run_hook()
self.uut._do_work()
self.uut._post_run_hook()
self._verify_log_file_and_lines(log_path, 1)
def test_011_log_writer_writes_partial_log_line(self):
"""Test writing partial log lines with unicode characters to file."""
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogWriterProcess("fake_device", self.exception_queue,
self.command_queue, self.log_queue,
log_path)
switchboard_process.put_message(self.log_queue, _PARTIAL_LOG_MESSAGE)
wait_for_queue_writes(self.log_queue)
self.uut._pre_run_hook()
self.uut._do_work()
self.uut._post_run_hook()
lines = self._verify_log_file_and_lines(log_path, 1)
self.assertIn(
"[NO EOL]", lines[0],
"Expected '[NO EOL]' at end of partial line found {!r}".format(
lines[0]))
def test_012_log_writer_writes_partial_and_full_log_lines(self):
"""Test writing partial and full log lines with unicode characters."""
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogWriterProcess("fake_device", self.exception_queue,
self.command_queue, self.log_queue,
log_path)
switchboard_process.put_message(self.log_queue, _PARTIAL_LOG_MESSAGE)
switchboard_process.put_message(self.log_queue, _FULL_LOG_MESSAGE)
wait_for_queue_writes(self.log_queue)
self.uut._pre_run_hook()
self.uut._do_work()
self.uut._do_work()
self.uut._post_run_hook()
self._verify_log_file_and_lines(log_path, 2)
def test_100_log_writer_rejects_invalid_command(self):
"""Test LogWriterProcess rejects invalid command."""
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogWriterProcess("fake_device", self.exception_queue,
self.command_queue, self.log_queue,
log_path)
self.command_queue.put(("invalid cmd", None))
wait_for_queue_writes(self.command_queue)
self.uut._pre_run_hook()
with self.assertRaisesRegex(RuntimeError, "received an unknown command"):
self.uut._do_work()
self.uut._post_run_hook()
def test_101_log_writer_accepts_valid_common_commands(self):
"""Test LogWriterProcess send_command accepts valid common commands."""
log_file_name = self._testMethodName + ".txt"
log_path = os.path.join(self.artifacts_directory, log_file_name)
self.uut = log_process.LogWriterProcess("fake_device", self.exception_queue,
self.command_queue, self.log_queue,
log_path)
for command in log_process._VALID_WRITER_COMMANDS:
self.uut.send_command(command)
wait_for_queue_writes(self.command_queue)
self.assertFalse(self.command_queue.empty(),
"Expected command queue to not be empty")
command_message = self.command_queue.get()
self.assertEqual(
command, command_message[0],
"Expected command {} found {}".format(command, command_message[0]))
def test_200_log_writer_uses_new_log_file(self):
"""Test switching LogWriterProcess to use new log file specified."""
old_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-old.txt")
new_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-new.txt")
self.uut = log_process.LogWriterProcess("fake_device", self.exception_queue,
self.command_queue, self.log_queue,
old_log_path)
switchboard_process.put_message(
self.command_queue, (log_process.CMD_NEW_LOG_FILE, new_log_path))
wait_for_queue_writes(self.command_queue)
self.uut._pre_run_hook() # Open old log file
self.uut._do_work() # Process new log file command and opens new log file
switchboard_process.put_message(self.log_queue, _FULL_LOG_MESSAGE)
wait_for_queue_writes(self.log_queue)
self.uut._do_work() # Writes full log message to new log file
self.uut._post_run_hook()
old_lines = self._verify_log_file_and_lines(old_log_path, 1)
new_lines = self._verify_log_file_and_lines(new_log_path, 1)
self.assertNotEqual(old_lines, new_lines,
"Expected {!r} == {!r}".format(old_lines, new_lines))
def test_210_log_writer_rotates_log_file(self):
"""Test LogWriterProcess rotating to new log file."""
max_log_size = len(_FULL_LOG_MESSAGE)
old_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device.txt")
new_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device.00001.txt")
next_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device.00002.txt")
self.uut = log_process.LogWriterProcess(
"fake_device",
self.exception_queue,
self.command_queue,
self.log_queue,
old_log_path,
max_log_size=max_log_size)
switchboard_process.put_message(self.log_queue, _FULL_LOG_MESSAGE)
wait_for_queue_writes(self.log_queue)
self.uut._pre_run_hook()
self.uut._do_work()
switchboard_process.put_message(self.log_queue, _SHORT_LOG_MESSAGE)
wait_for_queue_writes(self.log_queue)
self.uut._do_work()
self.uut._post_run_hook()
old_lines = self._verify_log_file_and_lines(old_log_path, 2)
self._verify_log_file_and_lines(new_log_path, 1)
self.assertIn(
log_process.ROTATE_LOG_MESSAGE, old_lines[1],
"Expected {} log message in old log file found {!r}".format(
log_process.ROTATE_LOG_MESSAGE, old_lines))
self.assertTrue(
os.path.exists(new_log_path),
"Expected log rotation to {}".format(new_log_path))
self.assertFalse(
os.path.exists(next_log_path),
"Expected no log rotation to {}".format(next_log_path))
def test_211_log_writer_new_log_command_handled_before_log_rotate(self):
"""Test new log message could but doesn't trigger rotate log."""
max_log_size = len(_NEW_LOG_FILE_MESSAGE)
old_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-old.txt")
new_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-new.txt")
next_log_path1 = os.path.join(self.artifacts_directory,
self._testMethodName,
"fake-device-old.00001.txt")
next_log_path2 = os.path.join(self.artifacts_directory,
self._testMethodName,
"fake-device-new.00001.txt")
self.uut = log_process.LogWriterProcess(
"fake_device",
self.exception_queue,
self.command_queue,
self.log_queue,
old_log_path,
max_log_size=max_log_size)
switchboard_process.put_message(
self.command_queue, (log_process.CMD_NEW_LOG_FILE, new_log_path))
wait_for_queue_writes(self.command_queue)
self.uut._pre_run_hook() # Opens old log file
self.uut._do_work() # Process new log file command and opens new log file
self.uut._do_work() # Allows for possible log rotation issue
self.uut._post_run_hook()
old_lines = self._verify_log_file_and_lines(old_log_path, 1)
self._verify_log_file_and_lines(new_log_path, 0)
self.assertIn(
log_process.NEW_LOG_FILE_MESSAGE, old_lines[0],
"Expected {} log message in old log file found {!r}".format(
log_process.NEW_LOG_FILE_MESSAGE, old_lines))
self.assertTrue(
os.path.exists(new_log_path),
"Expected new log file of {}".format(new_log_path))
self.assertFalse(
os.path.exists(next_log_path1),
"Expected no log rotation to {}".format(next_log_path1))
self.assertFalse(
os.path.exists(next_log_path2),
"Expected no log rotation to {}".format(next_log_path2))
def test_212_log_writer_can_change_max_log_size(self):
"""Test LogWriterProcess can change max_log_size."""
max_log_size = 0
old_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-old.txt")
next_log_path = os.path.join(self.artifacts_directory, self._testMethodName,
"fake-device-old.00001.txt")
self.uut = log_process.LogWriterProcess(
"fake_device",
self.exception_queue,
self.command_queue,
self.log_queue,
old_log_path,
max_log_size=len(_FULL_LOG_MESSAGE))
switchboard_process.put_message(
self.command_queue, (log_process.CMD_MAX_LOG_SIZE, max_log_size))
wait_for_queue_writes(self.command_queue)
self.uut._pre_run_hook() # Opens old log file
self.uut._do_work() # Process max_log_size command and opens new log file
switchboard_process.put_message(self.log_queue, _FULL_LOG_MESSAGE)
wait_for_queue_writes(self.log_queue)
self.uut._do_work() # Allows for possible log rotation issue
self.uut._post_run_hook()
old_lines = self._verify_log_file_and_lines(old_log_path, 2)
self.assertIn(
log_process.CHANGE_MAX_LOG_SIZE, old_lines[0],
"Expected {} log message in old log file found {!r}".format(
log_process.CHANGE_MAX_LOG_SIZE, old_lines))
self.assertIn(
_FULL_LOG_MESSAGE, old_lines[1],
"Expected {} log message in old log file found {!r}".format(
_FULL_LOG_MESSAGE, old_lines))
self.assertFalse(
os.path.exists(next_log_path),
"Expected no log rotation to {}".format(next_log_path))
def _verify_log_file_and_lines(self, log_path, count):
filesize = os.path.getsize(log_path)
if count > 0:
self.assertGreater(filesize, 0,
"Expected file size > 0 found {}".format(filesize))
else:
self.assertEqual(0, filesize,
"Expected file size 0 found {}".format(filesize))
with open(log_path, encoding="utf-8") as file:
lines = file.readlines()
self.assertEqual(
count, len(lines),
"Expected {} log line(s) found {}".format(count, len(lines)))
for line in lines:
match = re.search(log_process.LOG_LINE_HEADER_FORMAT, line)
self.assertIsNotNone(
match, "Expected log line header {!r} in {!r}".format(
log_process.LOG_LINE_HEADER_FORMAT, line))
return lines
if __name__ == "__main__":
unit_test_case.main()
| |
import base64
import json
import os
import unittest
from cryptography.hazmat.backends.openssl.backend import backend
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
from sdc.crypto.key_store import KeyStore
import jwt
import yaml
import settings
from server import app
from server import KEY_PURPOSE_SUBMISSION
def get_key(key_name):
key = open(key_name, 'r')
contents = key.read()
return contents
# sdx keys
PRIVATE_KEY = get_key("./jwt-test-keys/sdc-sdx-submission-encryption-private-v1.pem")
TEST_EQ_PRIVATE_KEY = get_key("./jwt-test-keys/eq/sdc-eq-submission-signing-private-v1.pem")
class Encrypter:
def __init__(self, private_kid, public_kid):
self.private_kid = private_kid
self.public_kid = public_kid
private_key_bytes = self._to_bytes(TEST_EQ_PRIVATE_KEY)
self.private_key = serialization.load_pem_private_key(private_key_bytes,
password=None,
backend=backend)
private_decryption_key = serialization.load_pem_private_key(
PRIVATE_KEY.encode(),
password=None,
backend=backend
)
public_key_bytes = private_decryption_key.public_key().public_bytes(
encoding=Encoding.PEM,
format=PublicFormat.SubjectPublicKeyInfo
)
self.public_key = serialization.load_pem_public_key(public_key_bytes, backend=backend)
# first generate a random key
self.cek = os.urandom(32) # 256 bit random CEK
# now generate a random IV
self.iv = os.urandom(12) # 96 bit random IV
@classmethod
def _to_bytes(self, bytes_or_str):
if isinstance(bytes_or_str, str):
value = bytes_or_str.encode()
else:
value = bytes_or_str
return value
def _jwe_protected_header(self):
header = '{"alg":"RSA-OAEP","enc":"A256GCM", "kid":"' + self.private_kid + '"}'
return self._base_64_encode(header.encode())
def _encrypted_key(self, cek):
ciphertext = self.public_key.encrypt(cek, padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()), algorithm=hashes.SHA1(), label=None))
return self._base_64_encode(ciphertext)
def _encode_iv(self, iv):
return self._base_64_encode(iv)
@classmethod
def _base_64_encode(self, text):
# strip the trailing = as they are padding to make the result a multiple of 4
# the RFC does the same, as do other base64 libraries so this is a safe operation
return base64.urlsafe_b64encode(text).decode().strip("=").encode()
def _encode_and_signed(self, payload):
return jwt.encode(payload, self.private_key, algorithm="RS256", headers={'kid': self.public_kid, 'typ': 'jwt'})
def encrypt(self, json):
payload = self._encode_and_signed(json)
jwe_protected_header = self._jwe_protected_header()
encrypted_key = self._encrypted_key(self.cek)
cipher = Cipher(algorithms.AES(self.cek), modes.GCM(self.iv), backend=backend)
encryptor = cipher.encryptor()
encryptor.authenticate_additional_data(jwe_protected_header)
ciphertext = encryptor.update(payload) + encryptor.finalize()
tag = encryptor.tag
encoded_ciphertext = self._base_64_encode(ciphertext)
encoded_tag = self._base_64_encode(tag)
# assemble result
jwe = jwe_protected_header + b"." + encrypted_key + b"." + \
self._encode_iv(self.iv) + b"." + encoded_ciphertext + b"." + encoded_tag
return jwe
class TestDecryptService(unittest.TestCase):
decrypt_endpoint = "/decrypt"
def setUp(self):
# creates a test client
self.app = app.test_client()
# propagate the exceptions to the test client
self.app.testing = True
with open(settings.SDX_KEYS_FILE) as file:
secrets_from_file = yaml.safe_load(file)
secret_store = KeyStore(secrets_from_file)
jwt_key = secret_store.get_key_for_purpose_and_type(KEY_PURPOSE_SUBMISSION, "private")
jwe_key = secret_store.get_key_for_purpose_and_type(KEY_PURPOSE_SUBMISSION, "public")
self.encrypter = Encrypter(jwt_key.kid, jwe_key.kid)
def encrypt_and_send_json(self, json_string):
data = json.loads(json_string)
encoded_data = self.encrypter.encrypt(data)
# Ask posie to decode message
r = self.app.post(self.decrypt_endpoint, data=encoded_data)
return r
def test_decrypt_fail_sends_400(self):
# Ask posie to decode message
r = self.app.post(self.decrypt_endpoint, data='rubbish')
self.assertEqual(r.status_code, 400)
def test_no_content_sends_400(self):
# Ask posie to decode message
r = self.app.post(self.decrypt_endpoint, data='')
self.assertEqual(r.status_code, 400)
def test_decrypts_message(self):
# Encrypt a message with the key
message = '''{"some": "well", "formed": "json"}'''
# Ask posie to decode message
r = self.encrypt_and_send_json(message)
# Compare to bytestring version of decrypted data
self.assertEqual(json.loads(r.data.decode('UTF8')), json.loads(message))
def test_decrypts_large_message_no_tx_id(self):
# Encrypt a message with the key
message = '''{
"type": "uk.gov.ons.edc.eq:surveyresponse",
"version": "0.0.1",
"origin": "uk.gov.ons.edc.eq",
"survey_id": "21",
"collection": {
"exercise_sid": "hfjdskf",
"instrument_id": "0203",
"period": "2016-02-01"
},
"submitted_at": "2016-03-12T10:39:40Z",
"metadata": {
"user_id": "789473423",
"ru_ref": "12345678901A"
},
"data": {
"11": "01042016",
"12": "31102016",
"20": "1800000",
"51": "84",
"52": "10",
"53": "73",
"54": "24",
"50": "205",
"22": "705000",
"23": "900",
"24": "74",
"25": "50",
"26": "100",
"21": "60000",
"27": "7400",
"146": "some comment"
}
}'''
# Encrypt and ask posie to decode message
r = self.encrypt_and_send_json(message)
self.assertEqual(json.loads(r.data.decode('UTF8')), json.loads(message))
def test_decrypts_large_message_with_tx_id(self):
# Encrypt a message with the key
message = '''{
"type": "uk.gov.ons.edc.eq:surveyresponse",
"version": "0.0.1",
"origin": "uk.gov.ons.edc.eq",
"survey_id": "21",
"tx_id": "27923934-62de-475c-bc01-433c09fd38b8",
"collection": {
"exercise_sid": "hfjdskf",
"instrument_id": "0203",
"period": "2016-02-01"
},
"submitted_at": "2016-03-12T10:39:40Z",
"metadata": {
"user_id": "789473423",
"ru_ref": "12345678901A"
},
"data": {
"11": "01042016",
"12": "31102016",
"20": "1800000",
"51": "84",
"52": "10",
"53": "73",
"54": "24",
"50": "205",
"22": "705000",
"23": "900",
"24": "74",
"25": "50",
"26": "100",
"21": "60000",
"27": "7400",
"146": "some comment"
}
}'''
# Encrypt and ask posie to decode message
r = self.encrypt_and_send_json(message)
self.assertEqual(json.loads(r.data.decode('UTF8')), json.loads(message))
| |
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from pyglet.gl.base import Config, CanvasConfig, Context
from pyglet.gl import ContextException
from pyglet.gl import gl
from pyglet.gl import agl
from pyglet.canvas.cocoa import CocoaCanvas
from pyglet.libs.darwin.objc_runtime import *
# Valid names for GL attributes and their corresponding NSOpenGL constant.
_gl_attributes = {
'double_buffer': NSOpenGLPFADoubleBuffer,
'stereo': NSOpenGLPFAStereo,
'buffer_size': NSOpenGLPFAColorSize,
'sample_buffers': NSOpenGLPFASampleBuffers,
'samples': NSOpenGLPFASamples,
'aux_buffers': NSOpenGLPFAAuxBuffers,
'alpha_size': NSOpenGLPFAAlphaSize,
'depth_size': NSOpenGLPFADepthSize,
'stencil_size': NSOpenGLPFAStencilSize,
# Not exposed by pyglet API (set internally)
'all_renderers': NSOpenGLPFAAllRenderers,
'fullscreen': NSOpenGLPFAFullScreen,
'minimum_policy': NSOpenGLPFAMinimumPolicy,
'maximum_policy': NSOpenGLPFAMaximumPolicy,
'screen_mask' : NSOpenGLPFAScreenMask,
# Not supported in current pyglet API
'color_float': NSOpenGLPFAColorFloat,
'offscreen': NSOpenGLPFAOffScreen,
'sample_alpha': NSOpenGLPFASampleAlpha,
'multisample': NSOpenGLPFAMultisample,
'supersample': NSOpenGLPFASupersample,
}
# NSOpenGL constants which do not require a value.
_boolean_gl_attributes = frozenset([
NSOpenGLPFAAllRenderers,
NSOpenGLPFADoubleBuffer,
NSOpenGLPFAStereo,
NSOpenGLPFAMinimumPolicy,
NSOpenGLPFAMaximumPolicy,
NSOpenGLPFAOffScreen,
NSOpenGLPFAFullScreen,
NSOpenGLPFAColorFloat,
NSOpenGLPFAMultisample,
NSOpenGLPFASupersample,
NSOpenGLPFASampleAlpha,
])
# Attributes for which no NSOpenGLPixelFormatAttribute name exists.
# We could probably compute actual values for these using
# NSOpenGLPFAColorSize / 4 and NSOpenGLFAAccumSize / 4, but I'm not that
# confident I know what I'm doing.
_fake_gl_attributes = {
'red_size': 0,
'green_size': 0,
'blue_size': 0,
'accum_red_size': 0,
'accum_green_size': 0,
'accum_blue_size': 0,
'accum_alpha_size': 0
}
class CocoaConfig(Config):
def match(self, canvas):
# Construct array of attributes for NSOpenGLPixelFormat
attrs = []
for name, value in self.get_gl_attributes():
attr = _gl_attributes.get(name)
if not attr or not value:
continue
attrs.append(attr)
if attr not in _boolean_gl_attributes:
attrs.append(int(value))
# Support for RAGE-II, which is not compliant.
attrs.append(NSOpenGLPFAAllRenderers)
# Force selection policy.
attrs.append(NSOpenGLPFAMaximumPolicy)
# NSOpenGLPFAFullScreen is always supplied so we can switch to and
# from fullscreen without losing the context. Also must supply the
# NSOpenGLPFAScreenMask attribute with appropriate display ID.
# Note that these attributes aren't necessary to render in fullscreen
# on Mac OS X 10.6, because there we are simply rendering into a
# screen sized window. See:
# http://developer.apple.com/library/mac/#documentation/GraphicsImaging/Conceptual/OpenGL-MacProgGuide/opengl_fullscreen/opengl_cgl.html%23//apple_ref/doc/uid/TP40001987-CH210-SW6
attrs.append(NSOpenGLPFAFullScreen)
attrs.append(NSOpenGLPFAScreenMask)
attrs.append(quartz.CGDisplayIDToOpenGLDisplayMask(quartz.CGMainDisplayID()))
# Terminate the list.
attrs.append(0)
attrsArrayType = c_uint32 * len(attrs)
attrsArray = attrsArrayType(*attrs)
# Create the pixel format.
pixel_format = send_message('NSOpenGLPixelFormat', 'alloc')
pixel_format = send_message(pixel_format, 'initWithAttributes:', attrsArray, argtypes=[attrsArrayType])
# Return the match list.
if pixel_format is None:
return []
else:
return [CocoaCanvasConfig(canvas, self, pixel_format)]
class CocoaCanvasConfig(CanvasConfig):
def __init__(self, canvas, config, pixel_format):
super(CocoaCanvasConfig, self).__init__(canvas, config)
self._pixel_format = pixel_format
# Query values for the attributes of the pixel format, and then set the
# corresponding attributes of the canvas config.
for name, attr in _gl_attributes.items():
vals = c_long()
send_message(self._pixel_format, 'getValues:forAttribute:forVirtualScreen:',
byref(vals), attr, 0, argtypes=[POINTER(c_long), c_int, c_long])
setattr(self, name, vals.value)
# Set these attributes so that we can run pyglet.info.
for name, value in _fake_gl_attributes.items():
setattr(self, name, value)
def create_context(self, share):
# Determine the shared NSOpenGLContext.
if share:
share_context = share._nscontext
else:
share_context = None
# Create a new NSOpenGLContext.
nscontext = send_message('NSOpenGLContext', 'alloc')
nscontext = send_message(nscontext, 'initWithFormat:shareContext:',
self._pixel_format,
share_context)
return CocoaContext(self, nscontext, share)
def compatible(self, canvas):
return isinstance(canvas, CocoaCanvas)
class CocoaContext(Context):
def __init__(self, config, nscontext, share):
super(CocoaContext, self).__init__(config, share)
self.config = config
self._nscontext = nscontext
def attach(self, canvas):
super(CocoaContext, self).attach(canvas)
# The NSView instance should be attached to a nondeferred window before calling
# setView, otherwise you get an "invalid drawable" message.
send_message(self._nscontext, 'setView:', canvas.nsview)
self.set_current()
def detach(self):
super(CocoaContext, self).detach()
send_message(self._nscontext, 'clearDrawable')
def set_current(self):
send_message(self._nscontext, 'makeCurrentContext')
super(CocoaContext, self).set_current()
def update_geometry(self):
# Need to call this method whenever the context drawable (an NSView)
# changes size or location.
send_message(self._nscontext, 'update')
def set_full_screen(self):
send_message(self._nscontext, 'makeCurrentContext')
send_message(self._nscontext, 'setFullScreen')
def destroy(self):
super(CocoaContext, self).destroy()
send_message(self._nscontext, 'release')
self._nscontext = None
def set_vsync(self, vsync=True):
vals = c_long(vsync)
send_message(self._nscontext, 'setValues:forParameter:',
byref(vals), NSOpenGLCPSwapInterval, argtypes=[POINTER(c_long), c_int])
def get_vsync(self):
vals = c_long()
send_message(self._nscontext, 'getValues:forParameter:',
byref(vals), NSOpenGLCPSwapInterval, argtypes=[POINTER(c_long), c_int])
return vals.value
def flip(self):
send_message(self._nscontext, 'flushBuffer')
| |
from geopar.angle_class import Angle
__author__ = 'mostly satbek'
class TriangulatedFigure:
"""
Class Invariants
1: self.triangles is a list of triangle, each with a unique set of vertices
2: For every triangle t1 in self.triangles, there is a t2 in self.triangles
such that t1.points and t2.points share 2 elements
"""
def __init__(self, triangles=None):
# the Triangle objects that make up self
if triangles:
self._triangles = triangles
else:
self._triangles = []
def get_id(self):
# 'id' of a triangulated figure is an integer number (result of built-in hash() function)
# that is unique to every triangulated figure with different configurations.
# That is, two triangulated figures with equivalent configurations have the same states.
return hash(str(sorted(list(map(hash, self._triangles)))))
def add(self, a_triangle):
# !!!
# Precondition 1: a_triangle is a Triangle instance
# Precondition 2: len(self.triangles) < 2
# --XOR--
# a_triangle ... is not in self.triangles AND
# ... shares two vertices with a Triangle in old(self.triangles)
# Postcondition: a_triangle is in self.triangles
self._triangles.append(a_triangle)
def set_angle_by_angle_points(self, p1, p2, p3, angle_):
"""
Sets an angle in a triangulated figure by the angle's angle points.
Any angle in a Triangulated Figure can be described by a unique set of points
called angle points. In geometry,
A
|
|
|_ a
|_|________C
B
angle a can be referred to as ABC. A, B, and C are vertices of line segments AB and BC.
In this project, we use the same geometric notion to describe an angle in a triangulated figure.
Since points in triangulated figure are unique, any angle has its own set of unique points.
We call them angle points. To make things consistent, we describe an angle by its angle points
in clockwise order. So, for the above example, angle points for angle a would be CBA.
PRE1: (p1 and p2 and p3) are in self.get_points()
PRE2: Points are in clockwise order
PRE3: angle_ is (Angle or int or float) instance
PRE4: angle_ has the same dimensionality as any of known angles in self
POST: !!!
"""
for triangle in self._triangles:
if triangle.has_all_points([p1, p2, p3]):
triangle.set_angle_by_point(p2, angle_)
def get_angle_by_angle_points(self, p1, p2, p3):
"""
Returns an angle in a triangulated figure by the angle's angle points.
PRE1: (p1 and p2 and p3) are in self.get_points()
PRE2: Points are in clockwise order
"""
for triangle in self._triangles:
if triangle.has_all_points([p1, p2, p3]):
return triangle.angle_of_point(p2)
def get_triangles(self):
"""
Returns a list of triangles that make up self.
"""
return self._triangles
def get_points(self):
"""
Returns a set of all points that make up self.
"""
all_points = list()
for triangle in self._triangles:
all_points.extend(triangle.get_points())
return list(set(all_points))
def __str__(self):
"""
Returns a string representation of self.
"""
return_str = ""
for current_triangle in self._triangles:
return_str += str(current_triangle)
return_str += "\n"
return return_str
def all_angles_are_known(self):
"""
Returns True if all angles in self are known, False otherwise.
"""
for triangle in self._triangles:
if triangle.has_unknown_angle():
return False
return True
def is_empty(self):
"""
Returns True if self has no triangles, False otherwise.
"""
return not bool(self._triangles)
def triangles_with_point(self, a_point):
"""
Returns the (contiguous) list of self.triangles containing a_point in clockwise order.
PRE: At least one triangle in self.triangles contains a_point
"""
# [Collected]: triangles_with_a_point =
# the triangles in self.triangles containing a_point
triangles_with_a_point = []
for triangle in self._triangles:
if triangle.has_point(a_point):
triangles_with_a_point.append(triangle)
# (In Order): triangles_in_order is a non-empty sub-list of
# triangles_with_a_point, which is in clockwise order
# AND triangles_remaining = triangles_with_a_point\triangles_in_order
triangles_in_order = [triangles_with_a_point[0]]
triangles_remaining = triangles_with_a_point[1:]
while len(triangles_in_order) < len(triangles_with_a_point):
for triangle_ in triangles_remaining:
point_following = triangles_in_order[0].point_following(a_point)
if triangle_.point_preceding(a_point) == point_following:
triangles_in_order.insert(0, triangle_)
triangles_remaining.remove(triangle_)
break
point_preceding = triangles_in_order[-1].point_preceding(a_point)
if triangle_.point_following(a_point) == point_preceding:
triangles_in_order.append(triangle_)
triangles_remaining.remove(triangle_)
break
# (Complement): len(triangles_in_order) = len(triangles_with_a_point)
return triangles_in_order
def get_interior_points(self):
"""
Returns the list of interior points in self.
OBJECTIVES:
(Found 1a): found the points that have more than 2 triangles attached to them
AND
(Found 1b): saved them in point_nums, alongside with number of triangles that they are in
(Found 2): found interior points
(Complement): returned interior_points
"""
# (Found 1a)
all_points = self.get_points()
point_nums = []
for point in all_points:
n = len(self.triangles_with_point(point))
if n > 2:
# (Found 1b)
point_nums.append((point, n))
# (Found 2)
interior_points = []
for point_num in point_nums:
points = []
for triangle in self.get_triangles():
if triangle.has_point(point_num[0]):
points.extend(triangle.get_points())
if len(set(points)) == point_num[1] + 1:
interior_points.append(point_num[0])
# (Complement): all interior points found
return interior_points
def number_of_unknown_angles_at(self, a_point):
"""
Returns the number of unknown angles at a_point.
PRE: a_point is in self.get_points
POST: count contains the number of unknown angles at a_point
"""
count = 0
triangles = self.triangles_with_point(a_point)
for triangle in triangles:
angle = triangle.angle_of_point(a_point)
if not angle.is_known():
count += 1
return count
def sum_of_known_angles_at(self, a_point):
"""
Returns the sum of known angles at a_point.
PRE: a_point is in self.get_points
POST: sum_angles contains the sum of known angles at a_point
"""
sum_angles = 0
triangles = self.triangles_with_point(a_point)
for triangle in triangles:
angle = triangle.angle_of_point(a_point)
if angle.is_known():
sum_angles += angle
return sum_angles
def angle_points_of_unknown_angles_at(self, a_point):
"""
Returns a list of angle points of unknown angles at a_point.
PRE: a_point is in self.get_points
POST: list_of_points contains angle points of unknown angles
"""
list_of_points = []
triangles = self.triangles_with_point(a_point)
for triangle in triangles:
angle = triangle.angle_of_point(a_point)
if not angle.is_known():
angle_points = triangle.get_angle_points_by_point(a_point)
list_of_points.append(angle_points)
return list_of_points
def complete_unknown_angle_at(self, a_point):
"""
Computes an unknown angle at a point by using 360 degrees rule.
PRE1: a_point is an interior point of a triangulated figure a_tf
PRE2: there is exactly one unknown angle at a_point
POST: unknown angle (see PRE2) is computed
"""
# (Counted) unknowns_count contains the number of unknown angles at a_point
# unknowns_count is used to keep PRE1 true
unknowns_count = self.number_of_unknown_angles_at(a_point)
# (Summed up) angles_sum is a sum of known angles at a_point
angles_sum = self.sum_of_known_angles_at(a_point)
# (Found and set) unknown_angle is the value of the unknown_angle
unknown_angle = 360 - angles_sum
if unknowns_count == 1:
# (Recorded) angle_points is a list of angle_points of unknown_angle at a_point
angle_points = self.angle_points_of_unknown_angles_at(a_point)[-1]
self.set_angle_by_angle_points(*angle_points, unknown_angle)
| |
# Meant to parse out address lines, minus city,state,zip into a usable dict for address matching
# Ignores periods and commas, because no one cares.
import re
import csv
import os
import dstk
import sys
# Keep lowercase, no periods
# Requires numbers first, then option dash plus numbers.
street_num_regex = r'^(\d+)(-?)(\d*)$'
apartment_regex_number = r'(#?)(\d*)(\w*)'
cwd = os.path.dirname(os.path.realpath(__file__))
class AddressParser(object):
"""
AddressParser will be use to create Address objects. It contains a list of preseeded cities, states, prefixes,
suffixes, and street names that will help the Address object correctly parse the given string. It is loaded
with defaults that work in the average case, but can be adjusted for specific cases.
"""
suffixes = {}
# Lower case list of cities, used as a hint
cities = []
# Lower case list of streets, used as a hint
streets = []
prefixes = {
"n": "N.", "e": "E.", "s": "S.", "w": "W.", "ne": "NE.", "nw": "NW.", 'se': "SE.", 'sw': "SW.", 'north': "N.",
'east': "E.", 'south': "S.",
'west': "W.", 'northeast': "NE.", 'northwest': "NW.", 'southeast': "SE.", 'southwest': "SW."}
states = {
'Mississippi': 'MS', 'Oklahoma': 'OK', 'Delaware': 'DE', 'Minnesota': 'MN', 'Illinois': 'IL', 'Arkansas': 'AR',
'New Mexico': 'NM', 'Indiana': 'IN', 'Maryland': 'MD', 'Louisiana': 'LA', 'Idaho': 'ID', 'Wyoming': 'WY',
'Tennessee': 'TN', 'Arizona': 'AZ', 'Iowa': 'IA', 'Michigan': 'MI', 'Kansas': 'KS', 'Utah': 'UT',
'Virginia': 'VA', 'Oregon': 'OR', 'Connecticut': 'CT', 'Montana': 'MT', 'California': 'CA',
'Massachusetts': 'MA', 'West Virginia': 'WV', 'South Carolina': 'SC', 'New Hampshire': 'NH',
'Wisconsin': 'WI', 'Vermont': 'VT', 'Georgia': 'GA', 'North Dakota': 'ND', 'Pennsylvania': 'PA',
'Florida': 'FL', 'Alaska': 'AK', 'Kentucky': 'KY', 'Hawaii': 'HI', 'Nebraska': 'NE', 'Missouri': 'MO',
'Ohio': 'OH', 'Alabama': 'AL', 'New York': 'NY', 'South Dakota': 'SD', 'Colorado': 'CO', 'New Jersey': 'NJ',
'Washington': 'WA', 'North Carolina': 'NC', 'District of Columbia': 'DC', 'Texas': 'TX', 'Nevada': 'NV',
'Maine': 'ME', 'Rhode Island': 'RI'}
def __init__(self, suffixes=None, cities=None, streets=None, backend="default", dstk_api_base=None, logger=None, required_confidence=0.65):
"""
suffixes, cities and streets provide a chance to use different lists than the provided lists.
suffixes is probably good for most users, unless you have some suffixes not recognized by USPS.
cities is a very expansive list that may lead to false positives in some cases. If you only have a few cities
you know will show up, provide your own list for better accuracy. If you are doing addresses across the US,
the provided list is probably better.
streets can be used to limit the list of possible streets the address are on. It comes blank by default and
uses positional clues instead. If you are instead just doing a couple cities, a list of all possible streets
will decrease incorrect street names.
Valid backends include "default" and "dstk". If backend is dstk, it requires a dstk_api_base. Example of
dstk_api_base would be 'http://example.com'.
"""
self.logger = logger
self.backend = backend
self.dstk_api_base = dstk_api_base
self.required_confidence = required_confidence
if suffixes:
self.suffixes = suffixes
else:
self.load_suffixes(os.path.join(cwd, "suffixes.csv"))
if cities:
self.cities = cities
else:
self.load_cities(os.path.join(cwd, "cities.csv"))
if streets:
self.streets = streets
else:
self.load_streets(os.path.join(cwd, "streets.csv"))
if backend == "dstk":
if dstk_api_base is None:
raise ValueError("dstk_api_base is required for dstk backend.")
self.dstk = dstk.DSTK({'apiBase': dstk_api_base})
elif backend == "default":
pass
else:
raise ValueError("backend must be either 'default' or 'dstk'.")
def parse_address(self, address, line_number=-1):
"""
Return an Address object from the given address. Passes itself to the Address constructor to use all the custom
loaded suffixes, cities, etc.
"""
return Address(address, self, line_number, self.logger)
def dstk_multi_address(self, address_list):
if self.backend != "dstk":
raise ValueError("Only allowed for DSTK backends.")
if self.logger: self.logger.debug("Sending {0} possible addresses to DSTK".format(len(address_list)))
multi_address = self.dstk.street2coordinates(address_list)
if self.logger: self.logger.debug("Received {0} addresses from DSTK".format(len(multi_address)))
# if self.logger: self.logger.debug("End street2coords")
addresses = []
# if self.logger: self.logger.debug("Multi Addresses: {0}".format(multi_address))
for address, dstk_return in multi_address.items():
try:
if dstk_return is None:
# if self.logger: self.logger.debug("DSTK None return for: {0}".format(address))
continue
addresses.append(Address(address, self, -1, self.logger, dstk_pre_parse=dstk_return))
if self.logger: self.logger.debug("DSTK Address Appended: {0}".format(dstk_return))
except InvalidAddressException as e:
# if self.logger: self.logger.debug("Error from dstk Address: {0}".format(e.message))
continue
except DSTKConfidenceTooLowException as e:
continue
return addresses
def load_suffixes(self, filename):
"""
Build the suffix dictionary. The keys will be possible long versions, and the values will be the
accepted abbreviations. Everything should be stored using the value version, and you can search all
by using building a set of self.suffixes.keys() and self.suffixes.values().
"""
with open(filename, 'r') as f:
for line in f:
# Make sure we have key and value
if len(line.split(',')) != 2:
continue
# Strip off newlines.
self.suffixes[line.strip().split(',')[0]] = line.strip().split(',')[1]
def load_cities(self, filename):
"""
Load up all cities in lowercase for easier matching. The file should have one city per line, with no extra
characters. This isn't strictly required, but will vastly increase the accuracy.
"""
with open(filename, 'r') as f:
for line in f:
self.cities.append(line.strip().lower())
def load_streets(self, filename):
"""
Load up all streets in lowercase for easier matching. The file should have one street per line, with no extra
characters. This isn't strictly required, but will vastly increase the accuracy.
"""
with open(filename, 'r') as f:
for line in f:
self.streets.append(line.strip().lower())
# Procedure: Go through backwards. First check for apartment number, then
# street suffix, street name, street prefix, then building. For each sub,
# check if that spot is already filled in the dict.
class Address:
unmatched = False
house_number = None
street_prefix = None
street = None
street_suffix = None
apartment = None
# building = None
city = None
state = None
zip = None
original = None
# Only set for dstk
lat = None
lng = None
last_matched = None
unmatched = False
# Only used for debug
line_number = -1
# Confidence value from DSTK. 0 - 1, -1 for not set.
confidence = -1
def __init__(self, address, parser, line_number=-1, logger=None, dstk_pre_parse=None):
"""
@dstk_pre_parse: a single value from a dstk multiple street2coordinates return. @address would be the key then.
"""
self.parser = parser
self.line_number = line_number
self.original = self._clean(address)
self.logger = logger
if address is None:
return
address = self.preprocess_address(address)
if parser.backend == "dstk":
# if self.logger: self.logger.debug("Preparsed: {0}".format(dstk_pre_parse))
self.dstk_parse(address, parser, pre_parsed_address=dstk_pre_parse)
elif parser.backend == "default":
self.parse_address(address)
else:
raise ValueError("Parser gave invalid backend, must be either 'default' or 'dstk'.")
if self.house_number is None or self.house_number <= 0:
raise InvalidAddressException("Addresses must have house numbers.")
elif self.street is None or self.street == "":
raise InvalidAddressException("Addresses must have streets.")
# if self.house_number is None or self.street is None or self.street_suffix is None:
# raise ValueError("Street addresses require house_number, street, and street_suffix")
def parse_address(self, address):
# print "YOU ARE PARSING AN ADDRESS"
# Save the original string
# Get rid of periods and commas, split by spaces, reverse.
# Periods should not exist, remove them. Commas separate tokens. It's possible we can use commas for better guessing.
address = address.strip().replace('.', '')
# We'll use this for guessing.
self.comma_separated_address = address.split(',')
address = address.replace(',', '')
# First, do some preprocessing
# address = self.preprocess_address(address)
# Try all our address regexes. USPS says parse from the back.
address = reversed(address.split())
# Save unmatched to process after the rest is processed.
unmatched = []
# Use for contextual data
for token in address:
# print token, self
# Check zip code first
if self.check_zip(token):
continue
if self.check_state(token):
continue
if self.check_city(token):
continue
if self.check_street_suffix(token):
continue
if self.check_house_number(token):
continue
if self.check_street_prefix(token):
continue
if self.check_street(token):
continue
# if self.check_building(token):
# continue
if self.guess_unmatched(token):
continue
unmatched.append(token)
# Post processing
for token in unmatched:
# print "Unmatched token: ", token
if self.check_apartment_number(token):
continue
# print "Unmatched token: ", token
# print "Original address: ", self.original
self.unmatched = True
def preprocess_address(self, address):
"""
Takes a basic address and attempts to clean it up, extract reasonably assured bits that may throw off the
rest of the parsing, and return the cleaned address.
"""
# Run some basic cleaning
address = address.replace("# ", "#")
address = address.replace(" & ", "&")
# Clear the address of things like 'X units', which shouldn't be in an address anyway. We won't save this for now.
if re.search(r"-?-?\w+ units", address, re.IGNORECASE):
address = re.sub(r"-?-?\w+ units", "", address, flags=re.IGNORECASE)
# Sometimes buildings are put in parantheses.
# building_match = re.search(r"\(.*\)", address, re.IGNORECASE)
# if building_match:
# self.building = self._clean(building_match.group().replace('(', '').replace(')', ''))
# address = re.sub(r"\(.*\)", "", address, flags=re.IGNORECASE)
# Now let's get the apartment stuff out of the way. Using only sure match regexes, delete apartment parts from
# the address. This prevents things like "Unit" being the street name.
apartment_regexes = [r'#\w+ & \w+', '#\w+ rm \w+', "#\w+-\w", r'apt #{0,1}\w+', r'apartment #{0,1}\w+', r'#\w+',
r'# \w+', r'rm \w+', r'unit #?\w+', r'units #?\w+', r'- #{0,1}\w+', r'no\s?\d+\w*',
r'style\s\w{1,2}', r'townhouse style\s\w{1,2}']
for regex in apartment_regexes:
apartment_match = re.search(regex, address, re.IGNORECASE)
if apartment_match:
# print "Matched regex: ", regex, apartment_match.group()
self.apartment = self._clean(apartment_match.group())
address = re.sub(regex, "", address, flags=re.IGNORECASE)
# Now check for things like ", ," which throw off dstk
address = re.sub(r"\,\s*\,", ",", address)
return address
def check_zip(self, token):
"""
Returns true if token is matches a zip code (5 numbers). Zip code must be the last token in an address (minus anything
removed during preprocessing such as --2 units.
"""
if self.zip is None:
# print "last matched", self.last_matched
if self.last_matched is not None:
return False
# print "zip check", len(token) == 5, re.match(r"\d{5}", token)
if len(token) == 5 and re.match(r"\d{5}", token):
self.zip = self._clean(token)
return True
return False
def check_state(self, token):
"""
Check if state is in either the keys or values of our states list. Must come before the suffix.
"""
# print "zip", self.zip
if len(token) == 2 and self.state is None:
if token.capitalize() in self.parser.states.keys():
self.state = self._clean(self.parser.states[token.capitalize()])
return True
elif token.upper() in self.parser.states.values():
self.state = self._clean(token.upper())
return True
if self.state is None and self.street_suffix is None and len(self.comma_separated_address) > 1:
if token.capitalize() in self.parser.states.keys():
self.state = self._clean(self.parser.states[token.capitalize()])
return True
elif token.upper() in self.parser.states.values():
self.state = self._clean(token.upper())
return True
return False
def check_city(self, token):
"""
Check if there is a known city from our city list. Must come before the suffix.
"""
shortened_cities = {'saint': 'st.'}
if self.city is None and self.state is not None and self.street_suffix is None:
if token.lower() in self.parser.cities:
self.city = self._clean(token.capitalize())
return True
return False
# Check that we're in the correct location, and that we have at least one comma in the address
if self.city is None and self.apartment is None and self.street_suffix is None and len(
self.comma_separated_address) > 1:
if token.lower() in self.parser.cities:
self.city = self._clean(token.capitalize())
return True
return False
# Multi word cities
if self.city is not None and self.street_suffix is None and self.street is None:
print "Checking for multi part city", token.lower(), token.lower() in shortened_cities.keys()
if token.lower() + ' ' + self.city in self.parser.cities:
self.city = self._clean((token.lower() + ' ' + self.city).capitalize())
return True
if token.lower() in shortened_cities.keys():
token = shortened_cities[token.lower()]
print "Checking for shorted multi part city", token.lower() + ' ' + self.city
if token.lower() + ' ' + self.city.lower() in self.parser.cities:
self.city = self._clean(token.capitalize() + ' ' + self.city.capitalize())
return True
def check_apartment_number(self, token):
"""
Finds apartment, unit, #, etc, regardless of spot in string. This needs to come after everything else has been ruled out,
because it has a lot of false positives.
"""
apartment_regexes = [r'#\w+ & \w+', '#\w+ rm \w+', "#\w+-\w", r'apt #{0,1}\w+', r'apartment #{0,1}\w+', r'#\w+',
r'# \w+', r'rm \w+', r'unit #?\w+', r'units #?\w+', r'- #{0,1}\w+', r'no\s?\d+\w*',
r'style\s\w{1,2}', r'\d{1,4}/\d{1,4}', r'\d{1,4}', r'\w{1,2}']
for regex in apartment_regexes:
if re.match(regex, token.lower()):
self.apartment = self._clean(token)
return True
# if self.apartment is None and re.match(apartment_regex_number, token.lower()):
## print "Apt regex"
# self.apartment = token
# return True
## If we come on apt or apartment and already have an apartment number, add apt or apartment to the front
if self.apartment and token.lower() in ['apt', 'apartment']:
# print "Apt in a_n"
self.apartment = self._clean(token + ' ' + self.apartment)
return True
if not self.street_suffix and not self.street and not self.apartment:
# print "Searching for unmatched term: ", token, token.lower(),
if re.match(r'\d?\w?', token.lower()):
self.apartment = self._clean(token)
return True
return False
def check_street_suffix(self, token):
"""
Attempts to match a street suffix. If found, it will return the abbreviation, with the first letter capitalized
and a period after it. E.g. "St." or "Ave."
"""
# Suffix must come before street
# print "Suffix check", token, "suffix", self.street_suffix, "street", self.street
if self.street_suffix is None and self.street is None:
# print "upper", token.upper()
if token.upper() in self.parser.suffixes.keys():
suffix = self.parser.suffixes[token.upper()]
self.street_suffix = self._clean(suffix.capitalize() + '.')
return True
elif token.upper() in self.parser.suffixes.values():
self.street_suffix = self._clean(token.capitalize() + '.')
return True
return False
def check_street(self, token):
"""
Let's assume a street comes before a prefix and after a suffix. This isn't always the case, but we'll deal
with that in our guessing game. Also, two word street names...well...
This check must come after the checks for house_number and street_prefix to help us deal with multi word streets.
"""
# First check for single word streets between a prefix and a suffix
if self.street is None and self.street_suffix is not None and self.street_prefix is None and self.house_number is None:
self.street = self._clean(token.capitalize())
return True
# Now check for multiple word streets. This check must come after the check for street_prefix and house_number for this reason.
elif self.street is not None and self.street_suffix is not None and self.street_prefix is None and self.house_number is None:
self.street = self._clean(token.capitalize() + ' ' + self.street)
return True
if not self.street_suffix and not self.street and token.lower() in self.parser.streets:
self.street = self._clean(token)
return True
return False
def check_street_prefix(self, token):
"""
Finds street prefixes, such as N. or Northwest, before a street name. Standardizes to 1 or two letters, followed
by a period.
"""
if self.street and not self.street_prefix and token.lower().replace('.', '') in self.parser.prefixes.keys():
self.street_prefix = self._clean(self.parser.prefixes[token.lower().replace('.', '')])
return True
return False
def check_house_number(self, token):
"""
Attempts to find a house number, generally the first thing in an address. If anything is in front of it,
we assume it is a building name.
"""
if self.street and self.house_number is None and re.match(street_num_regex, token.lower()):
if '/' in token:
token = token.split('/')[0]
if '-' in token:
token = token.split('-')[0]
self.house_number = self._clean(str(token))
return True
return False
def check_building(self, token):
"""
Building name check. If we have leftover and everything else is set, probably building names.
Allows for multi word building names.
"""
if self.street and self.house_number:
if not self.building:
self.building = self._clean(token)
else:
self.building = self._clean(token + ' ' + self.building)
return True
return False
def guess_unmatched(self, token):
"""
When we find something that doesn't match, we can make an educated guess and log it as such.
"""
# Check if this is probably an apartment:
if token.lower() in ['apt', 'apartment']:
return False
# Stray dashes are likely useless
if token.strip() == '-':
return True
# Almost definitely not a street if it is one or two characters long.
if len(token) <= 2:
return False
# Let's check for a suffix-less street.
if self.street_suffix is None and self.street is None and self.street_prefix is None and self.house_number is None:
# Streets will just be letters
if re.match(r"[A-Za-z]", token):
if self.line_number >= 0:
pass
# print "{0}: Guessing suffix-less street: ".format(self.line_number), token
else:
# print "Guessing suffix-less street: ", token
pass
self.street = self._clean(token.capitalize())
return True
return False
def full_address(self):
"""
Print the address in a human readable format
"""
addr = ""
# if self.building:
# addr = addr + "(" + self.building + ") "
if self.house_number:
addr = addr + self.house_number
if self.street_prefix:
addr = addr + " " + self.street_prefix
if self.street:
addr = addr + " " + self.street
if self.street_suffix:
addr = addr + " " + self.street_suffix
if self.apartment:
addr = addr + " " + self.apartment
if self.city:
addr = addr + ", " + self.city
if self.state:
addr = addr + ", " + self.state
if self.zip:
addr = addr + " " + self.zip
return addr
def _clean(self, item):
if item is None:
return None
else:
return item.encode("utf-8", "replace")
def __repr__(self):
return unicode(self)
def __str__(self):
return unicode(self)
def __unicode__(self):
address_dict = {
"house_number": self.house_number,
"street_prefix": self.street_prefix,
"street": self.street,
"street_suffix": self.street_suffix,
"apartment": self.apartment,
# "building": self.building,
"city": self.city,
"state": self.state,
"zip": self.zip
}
# print "Address Dict", address_dict
return u"Address - House number: {house_number} Prefix: {street_prefix} Street: {street} Suffix: {street_suffix}" \
u" Apartment: {apartment} City,State,Zip: {city}, {state} {zip}".format(**address_dict)
def dstk_parse(self, address, parser, pre_parsed_address=None):
"""
Given an address string, use DSTK to parse the address and then coerce it to a normal Address object.
pre_parsed_address for multi parsed string. Gives the value part for single dstk return value. If
pre_parsed_address is None, parse it via dstk on its own.
"""
if pre_parsed_address:
dstk_address = pre_parsed_address
else:
if self.logger: self.logger.debug("Asking DSTK for address parse {0}".format(address.encode("ascii", "ignore")))
dstk_address = parser.dstk.street2coordinates(address)
# if self.logger: self.logger.debug("dstk return: {0}".format(dstk_address))
if 'confidence' not in dstk_address:
raise InvalidAddressException("Could not deal with DSTK return: {0}".format(dstk_address))
if dstk_address['street_address'] == "":
raise InvalidAddressException("Empty street address in DSTK return: {0}".format(dstk_address))
if dstk_address['street_number'] is None or dstk_address['street_name'] is None:
raise InvalidAddressException("House number or street name was Non in DSTK return: {0}".format(dstk_address))
if dstk_address['confidence'] < parser.required_confidence:
raise DSTKConfidenceTooLowException("Required confidence: {0}. Got confidence: {1}. Address: {2}. Return: {3}.".format(parser.required_confidence, dstk_address['confidence'], address.encode("ascii", "ignore"), dstk_address))
self.confidence = dstk_address['confidence']
if 'street_address' in dstk_address:
intersections = self._get_dstk_intersections(address, dstk_address['street_address'])
if self.logger: self.logger.debug("Confidence: {0}.".format(dstk_address['confidence']))
if self.logger: self.logger.debug("Address: {0}.".format(address))
if self.logger: self.logger.debug("Return: {0}.".format(dstk_address))
# if self.logger: self.logger.debug("")
addr = dstk_address
if addr is None:
raise InvalidAddressException("DSTK could not parse address: {0}".format(self.original))
if "street_number" in addr:
if addr["street_number"] not in address:
raise InvalidAddressException("DSTK returned a house number not in the original address: {0}".format(addr))
self.house_number = addr["street_number"]
else:
raise InvalidAddressException("(dstk) Addresses must have house numbers: {0}".format(addr))
if "locality" in addr:
self.city = addr["locality"]
# DSTK shouldn't be returning unknown cities
if addr["locality"] not in address:
raise InvalidAddressException("DSTK returned a city not in the address. City: {0}, Address: {1}.".format(self.city, address))
if "region" in addr:
self.state = addr["region"]
# if "fips_county" in addr:
# self.zip = addr["fips_county"]
if "latitude" in addr:
self.lat = addr["latitude"]
if "longitude" in addr:
self.lng = addr["longitude"]
# Try and find the apartment
# First remove the street_address (this doesn't include apartment)
if "street_address" in addr:
apartment = address.replace(addr["street_address"], '')
# Make sure the city doesn't somehow come before the street in the original string.
# try:
# end_pos = re.search("(" + addr["locality"] + ")", apartment).start(1) - 1
# # self.apartment = apartment[:end_pos]
# except Exception:
# pass
# self.apartment = None
# Now that we have an address, try to parse out street suffix, prefix, and street
if self.apartment:
street_addr = addr["street_address"].replace(self.apartment, '')
else:
street_addr = addr["street_address"]
# We should be left with only prefix, street, suffix. Go for suffix first.
split_addr = street_addr.split()
if len(split_addr) == 0:
if self.logger: self.logger.debug("Could not split street_address: {0}".format(addr))
raise InvalidAddressException("Could not split street_address: {0}".format(addr))
# Get rid of house_number
if split_addr[0] == self.house_number:
split_addr = split_addr[1:]
if self.logger: self.logger.debug("Checking {0} for suffixes".format(split_addr[-1].upper()))
if split_addr[-1].upper() in parser.suffixes.keys() or split_addr[-1].upper() in parser.suffixes.values():
self.street_suffix = split_addr[-1]
split_addr = split_addr[:-1]
if self.logger: self.logger.debug("Checking {0} for prefixes".format(split_addr[0].lower()))
if split_addr[0].lower() in parser.prefixes.keys() or split_addr[0].upper() in parser.prefixes.values() or \
split_addr[0].upper() + '.' in parser.prefixes.values():
if split_addr[0][-1] == '.':
self.street_prefix = split_addr[0].upper()
else:
self.street_prefix = split_addr[0].upper() + '.'
if self.logger: self.logger.debug("Saving prefix: {0}".format(self.street_prefix))
split_addr = split_addr[1:]
if self.logger: self.logger.debug("Saving street: {0}".format(split_addr))
self.street = " ".join(split_addr)
# DSTK shouldn't be guessing cities that come before streets.
match = re.search(self.street, address)
if match is None:
raise InvalidAddressException("DSTK picked a street not in the original address. Street: {0}. Address: {1}.".format(self.street, address))
street_position = match
match = re.search(self.city, address)
if match is None:
raise InvalidAddressException("DSTK picked a city not in the original address. City: {0}. Address: {1}.".format(self.city, address))
city_position = match
if city_position.start(0) < street_position.end(0):
raise InvalidAddressException("DSTK picked a street that comes after the city. Street: {0}. City: {1}. Address: {2}.".format(self.street, self.city, address))
if self.logger: self.logger.debug("Successful DSTK address: {0}, house: {1}, street: {2}\n".format(self.original, self.house_number, self.street))
def _get_dstk_intersections(self, address, dstk_address):
"""
Find the unique tokens in the original address and the returned address.
"""
# Normalize both addresses
normalized_address = self._normalize(address)
normalized_dstk_address = self._normalize(dstk_address)
address_uniques = set(normalized_address) - set(normalized_dstk_address)
dstk_address_uniques = set(normalized_dstk_address) - set(normalized_address)
if self.logger: self.logger.debug("Address Uniques {0}".format(address_uniques))
if self.logger: self.logger.debug("DSTK Address Uniques {0}".format(dstk_address_uniques))
return (len(address_uniques), len(dstk_address_uniques))
def _normalize(self, address):
"""
Normalize prefixes, suffixes and other to make matching original to returned easier.
"""
normalized_address = []
if self.logger: self.logger.debug("Normalizing Address: {0}".format(address))
for token in address.split():
if token.upper() in self.parser.suffixes.keys():
normalized_address.append(self.parser.suffixes[token.upper()].lower())
elif token.upper() in self.parser.suffixes.values():
normalized_address.append(token.lower())
elif token.upper().replace('.', '') in self.parser.suffixes.values():
normalized_address.append(token.lower().replace('.', ''))
elif token.lower() in self.parser.prefixes.keys():
normalized_address.append(self.parser.prefixes[token.lower()].lower())
elif token.upper() in self.parser.prefixes.values():
normalized_address.append(token.lower()[:-1])
elif token.upper() + '.' in self.parser.prefixes.values():
normalized_address.append(token.lower())
else:
normalized_address.append(token.lower())
return normalized_address
def create_cities_csv(filename="places2k.txt", output="cities.csv"):
"""
Takes the places2k.txt from USPS and creates a simple file of all cities.
"""
with open(filename, 'r') as city_file:
with open(output, 'w') as out:
for line in city_file:
# Drop Puerto Rico (just looking for the 50 states)
if line[0:2] == "PR":
continue
# Per census.gov, characters 9-72 are the name of the city or place. Cut ,off the last part, which is city, town, etc.
# print " ".join(line[9:72].split()[:-1])
out.write(" ".join(line[9:72].split()[:-1]) + '\n')
class InvalidAddressException(Exception):
pass
class DSTKConfidenceTooLowException(Exception):
pass
if __name__ == "__main__":
ap = AddressParser()
print ap.parse_address(" ".join(sys.argv[1:]))
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitsOperations(object):
"""ExpressRouteCircuitsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuit"
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.ExpressRouteCircuit"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuit"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.ExpressRouteCircuit"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuit"]
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express route circuit operation.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuit
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuit or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuit]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuit"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
circuit_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuit"]
"""Updates an express route circuit tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to update express route circuit tags.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuit or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuit]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
def _list_arp_table_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsArpTableListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsArpTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self._list_arp_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
def begin_list_arp_table(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsArpTableListResult"]
"""Gets the currently advertised ARP table associated with the express route circuit in a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsArpTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuitsArpTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_arp_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
def _list_routes_table_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
def begin_list_routes_table(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsRoutesTableListResult"]
"""Gets the currently advertised routes table associated with the express route circuit in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuitsRoutesTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_routes_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
def _list_routes_table_summary_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
def begin_list_routes_table_summary(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
device_path, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
"""Gets the currently advertised routes table summary associated with the express route circuit in
a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitsRoutesTableSummaryListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
def get_stats(
self,
resource_group_name, # type: str
circuit_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitStats"
"""Gets all the stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self.get_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'} # type: ignore
def get_peering_stats(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitStats"
"""Gets all stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self.get_peering_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_peering_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitListResult"]
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitListResult"]
"""Gets all the express route circuits in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_12_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote
import logging
import re
from flexget import plugin
from flexget import validator
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, normalize_unicode, clean_title
from flexget.utils.requests import TimedLimiter
from flexget.utils.tools import parse_filesize
log = logging.getLogger('search_sceneaccess')
CATEGORIES = {
'browse':
{
'Movies/DVD-R': 8,
'Movies/x264': 22,
'Movies/XviD': 7,
'TV/HD-x264': 27,
'TV/SD-x264': 17,
'TV/XviD': 11,
'Games/PC': 3,
'Games/PS3': 5,
'Games/PSP': 20,
'Games/WII': 28,
'Games/XBOX360': 23,
'APPS/ISO': 1,
'DOX': 14,
'MISC': 21
},
'nonscene':
{
'Movies/HD-x264': 41,
'Movies/SD-x264': 42,
'Movies/XviD': 43,
'TV/HD': 44,
'TV/SD': 45
},
'mp3/0day':
{
'0DAY/APPS': 2,
'FLAC': 40,
'MP3': 13,
'MVID': 15,
},
'archive':
{
'Movies/Packs': 4,
'TV/Packs': 26,
'Games/Packs': 29,
'XXX/Packs': 37,
'Music/Packs': 38
},
'foreign':
{
'Movies/DVD-R': 31,
'Movies/x264': 32,
'Movies/XviD': 30,
'TV/x264': 34,
'TV/XviD': 33,
},
'xxx':
{
'XXX/XviD': 12,
'XXX/x264': 35,
'XXX/0DAY': 36
}
}
URL = 'https://sceneaccess.eu/'
class SceneAccessSearch(object):
""" Scene Access Search plugin
== Basic usage:
sceneaccess:
username: XXXX (required)
password: XXXX (required)
category: Movies/x264 (optional)
gravity_multiplier: 200 (optional)
== Categories:
+---------------+----------------+-----------+--------------+--------------+----------+
| browse | nonscene | mp3/0day | archive | foreign | xxx |
+---------------+----------------+-----------+--------------+--------------+----------+
| APPS/ISO | Movies/HD-x264 | 0DAY/APPS | Games/Packs | Movies/DVD-R | XXX/0DAY |
| DOX | Movies/SD-x264 | FLAC | Movies/Packs | Movies/x264 | XXX/x264 |
| Games/PC | Movies/XviD | MP3 | Music/Packs | Movies/XviD | XXX/XviD |
| Games/PS3 | TV/HD | MVID | TV/Packs | TV/x264 | |
| Games/PSP | TV/SD | | XXX/Packs | TV/XviD | |
| Games/WII | | | | | |
| Games/XBOX360 | | | | | |
| MISC | | | | | |
| Movies/DVD-R | | | | | |
| Movies/x264 | | | | | |
| Movies/XviD | | | | | |
| TV/HD-x264 | | | | | |
| TV/SD-x264 | | | | | |
| TV/XviD | | | | | |
+---------------+----------------+-----------+--------------+--------------+----------+
You can combine the categories almost any way you want, here are some examples:
category:
archive: yes => Will search all categories within archive section
category: Movies/x264 => Search Movies/x264 within 'browse' section (browse is always default if unspecified)
category:
browse:
- 22 => This is custom category ID
- Movies/XviD
foreign:
- Movies/x264
- Movies/XviD
Specifying specific category ID is also possible, you can extract ID from URL, for example
if you hover or click on category on the site you'll see similar address:
http://sceneaccess.URL/browse?cat=22
In this example, according to this bit ?cat=22 , category id is 22.
== Priority
gravity_multiplier is optional parameter that increases odds of downloading found matches from sceneaccess
instead of other search providers, that may have higer odds due to their higher number of peers.
Although sceneaccess does not have many peers as some public trackers, the torrents are usually faster.
By default, Flexget give higher priority to found matches according to following formula:
gravity = number of seeds * 2 + number of leechers
gravity_multiplier will multiply the above number by specified amount.
If you use public trackers for searches, you may want to use this feature.
"""
def validator(self):
"""Return config validator."""
root = validator.factory('dict')
root.accept('text', key='username', required=True)
root.accept('text', key='password', required=True)
root.accept('number', key='gravity_multiplier')
# Scope as in pages like `browse`, `mp3/0day`, `foreign`, etc.
# Will only accept categories from `browse` which will it default to, unless user specifies other scopes
# via dict
root.accept('choice', key='category').accept_choices(CATEGORIES['browse'])
root.accept('number', key='category')
categories = root.accept('dict', key='category')
category_list = root.accept('list', key='category')
category_list.accept('choice').accept_choices(CATEGORIES['browse'])
for category in CATEGORIES:
categories.accept('choice', key=category).accept_choices(CATEGORIES[category])
categories.accept('boolean', key=category)
categories.accept('number', key=category)
category_list = categories.accept('list', key=category)
category_list.accept('choice', key=category).accept_choices(CATEGORIES[category])
category_list.accept('number', key=category)
return root
def process_categories(self, config):
"""
sceneaccess use different url for different supercategories (let's call them scopes)
For example, most categories (international tv, movies, games, ...) reside within `browse` with url
www.sceneaccess.eu/browse or mp3 and 0day releases have their own scope called `mp3/0day` but their url is
www.sceneaccess.eu/spam
this method iterates over all possible combinations and returns a list of dicts that contain both relative link
to scope as well as url fragments of categories that main method search() will use:
Return example:
{'url_path': 'spam',
'category_url_string': '&c40=40&c13=13'
}
"""
to_process = dict()
scope = 'browse' # Default scope to search in
category = config.get('category')
if category:
if isinstance(category, dict): # Categories have search scope specified.
for scope in category:
if isinstance(category[scope], bool): # If provided boolean, search all categories within
category[scope] = [] # the scope.
elif not isinstance(category[scope], list): # or convert single category into list
category[scope] = [category[scope]]
to_process[scope] = category[scope]
else: # Will default to `browse` scope, because no scope was specified (only category)
category = [category]
to_process[scope] = category
else: # Category was not set, will default to all categories within `browse` scope.
to_process[scope] = []
ret = list()
for scope, categories in to_process.items():
cat_id_list = list()
for category in categories:
try:
cat_id = CATEGORIES[scope][category]
except KeyError: # User provided category id directly
cat_id = category
if isinstance(cat_id, list):
for l in cat_id:
cat_id_list.append(l)
else:
cat_id_list.append(cat_id)
if scope == 'mp3/0day': # mp3/0day is actually /spam?search= in URL, can safely change it now
scope = 'spam'
category_url_string = ''.join(['&c' + str(x) + '=' + str(x) for x in cat_id_list]) # &c<id>=<id>&...
ret.append({'url_path': scope, 'category_url_string': category_url_string})
return ret
@plugin.internet(log)
def search(self, task, entry, config=None):
"""
Search for entries on SceneAccess
"""
session = task.requests
if 'sceneaccess.eu' not in session.domain_limiters:
session.add_domain_limiter(TimedLimiter('sceneaccess.eu', '7 seconds'))
if not session.cookies:
log.debug('Logging in to %s...' % URL)
params = {'username': config['username'],
'password': config['password'],
'submit': 'come on in'}
session.post(URL + 'login', data=params)
if 'gravity_multiplier' in config:
multip = config['gravity_multiplier']
else:
multip = 1
# Prepare queries...
base_urls = list()
entries = set()
for category in self.process_categories(config):
base_urls.append(URL + '%(url_path)s?method=2%(category_url_string)s' % category)
# Search...
for search_string in entry.get('search_strings', [entry['title']]):
search_string_normalized = normalize_unicode(clean_title(search_string))
search_string_url_fragment = '&search=' + quote(search_string_normalized.encode('utf8'))
for url in base_urls:
url += search_string_url_fragment
log.debug('Search URL for `%s`: %s' % (search_string, url))
page = session.get(url).content
soup = get_soup(page)
for result in soup.findAll('tr', attrs={'class': 'tt_row'}):
entry = Entry()
entry['title'] = result.find('a', href=re.compile(r'details\?id=\d+'))['title']
entry['url'] = URL + result.find('a', href=re.compile(r'.torrent$'))['href']
entry['torrent_seeds'] = result.find('td', attrs={'class': 'ttr_seeders'}).text
entry['torrent_leeches'] = result.find('td', attrs={'class': 'ttr_leechers'}).text
entry['search_sort'] = torrent_availability(entry['torrent_seeds'],
entry['torrent_leeches']) * multip
size = result.find('td', attrs={'class': 'ttr_size'}).text
size = re.search('(\d+(?:[.,]\d+)*)\s?([KMG]B)', size)
entry['content_size'] = parse_filesize(size.group(0))
entries.add(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SceneAccessSearch, 'sceneaccess', interfaces=['search'], api_ver=2)
| |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import unittest
from functools import partial
from textwrap import dedent
from typing import Dict, List, Optional
from pants.base.build_environment import get_buildroot
from pants.option.option_value_container import OptionValueContainer
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.scope import ScopeInfo
from pants.util.contextutil import temporary_dir, temporary_file, temporary_file_path
class OptionsBootstrapperTest(unittest.TestCase):
def _config_path(self, path: Optional[str]) -> List[str]:
if path is None:
return ["--pants-config-files=[]"]
return [f"--pants-config-files=['{path}']"]
def assert_bootstrap_options(
self,
*,
config: Optional[Dict[str, str]] = None,
env: Optional[Dict[str, str]] = None,
args: Optional[List[str]] = None,
**expected_entries,
) -> None:
with temporary_file(binary_mode=False) as fp:
fp.write("[DEFAULT]\n")
if config:
for k, v in config.items():
fp.write(f"{k}: {v}\n")
fp.close()
args = [*self._config_path(fp.name), *(args or [])]
bootstrapper = OptionsBootstrapper.create(env=env or {}, args=args)
vals = bootstrapper.get_bootstrap_options().for_global_scope()
vals_dict = {k: getattr(vals, k) for k in expected_entries}
self.assertEqual(expected_entries, vals_dict)
def test_bootstrap_seed_values(self) -> None:
def assert_seed_values(
*,
config: Optional[Dict[str, str]] = None,
env: Optional[Dict[str, str]] = None,
args: Optional[List[str]] = None,
workdir: Optional[str] = None,
supportdir: Optional[str] = None,
distdir: Optional[str] = None,
) -> None:
self.assert_bootstrap_options(
config=config,
env=env,
args=args,
pants_workdir=workdir or os.path.join(get_buildroot(), ".pants.d"),
pants_supportdir=supportdir or os.path.join(get_buildroot(), "build-support"),
pants_distdir=distdir or os.path.join(get_buildroot(), "dist"),
)
# Check for valid default seed values
assert_seed_values()
# Check getting values from config, env and args.
assert_seed_values(
config={"pants_workdir": "/from_config/.pants.d"}, workdir="/from_config/.pants.d",
)
assert_seed_values(
env={"PANTS_SUPPORTDIR": "/from_env/build-support"},
supportdir="/from_env/build-support",
)
assert_seed_values(args=["--pants-distdir=/from_args/dist"], distdir="/from_args/dist")
# Check that args > env > config.
assert_seed_values(
config={
"pants_workdir": "/from_config/.pants.d",
"pants_supportdir": "/from_config/build-support",
"pants_distdir": "/from_config/dist",
},
env={"PANTS_SUPPORTDIR": "/from_env/build-support", "PANTS_DISTDIR": "/from_env/dist",},
args=["--pants-distdir=/from_args/dist"],
workdir="/from_config/.pants.d",
supportdir="/from_env/build-support",
distdir="/from_args/dist",
)
# Check that unrelated args and config don't confuse us.
assert_seed_values(
config={
"pants_workdir": "/from_config/.pants.d",
"pants_supportdir": "/from_config/build-support",
"pants_distdir": "/from_config/dist",
"unrelated": "foo",
},
env={
"PANTS_SUPPORTDIR": "/from_env/build-support",
"PANTS_DISTDIR": "/from_env/dist",
"PANTS_NO_RELATIONSHIP": "foo",
},
args=["--pants-distdir=/from_args/dist", "--foo=bar", "--baz"],
workdir="/from_config/.pants.d",
supportdir="/from_env/build-support",
distdir="/from_args/dist",
)
def test_bootstrap_bool_option_values(self) -> None:
# Check the default.
self.assert_bootstrap_options(pantsrc=True)
assert_pantsrc_is_false = partial(self.assert_bootstrap_options, pantsrc=False)
assert_pantsrc_is_false(args=["--no-pantsrc"])
assert_pantsrc_is_false(config={"pantsrc": False})
assert_pantsrc_is_false(env={"PANTS_PANTSRC": "False"})
def test_create_bootstrapped_options(self) -> None:
# Check that we can set a bootstrap option from a cmd-line flag and have that interpolate
# correctly into regular config.
with temporary_file(binary_mode=False) as fp:
fp.write(
dedent(
"""
[foo]
bar: %(pants_workdir)s/baz
[fruit]
apple: %(pants_supportdir)s/banana
"""
)
)
fp.close()
args = ["--pants-workdir=/qux"] + self._config_path(fp.name)
bootstrapper = OptionsBootstrapper.create(env={"PANTS_SUPPORTDIR": "/pear"}, args=args)
opts = bootstrapper.get_full_options(
known_scope_infos=[
ScopeInfo("", ScopeInfo.GLOBAL),
ScopeInfo("foo", ScopeInfo.TASK),
ScopeInfo("fruit", ScopeInfo.TASK),
]
)
# So we don't choke on these on the cmd line.
opts.register("", "--pants-workdir")
opts.register("", "--pants-config-files")
opts.register("foo", "--bar")
opts.register("fruit", "--apple")
self.assertEqual("/qux/baz", opts.for_scope("foo").bar)
self.assertEqual("/pear/banana", opts.for_scope("fruit").apple)
def test_bootstrapped_options_ignore_irrelevant_env(self) -> None:
included = "PANTS_SUPPORTDIR"
excluded = "NON_PANTS_ENV"
bootstrapper = OptionsBootstrapper.create(env={excluded: "pear", included: "banana",})
self.assertIn(included, bootstrapper.env)
self.assertNotIn(excluded, bootstrapper.env)
def test_create_bootstrapped_multiple_pants_config_files(self) -> None:
"""When given multiple config files, the later files should take precedence when options
conflict."""
def create_options_bootstrapper(*config_paths: str) -> OptionsBootstrapper:
return OptionsBootstrapper.create(
args=[f"--pants-config-files={cp}" for cp in config_paths]
)
def assert_config_read_correctly(
options_bootstrapper: OptionsBootstrapper, *, expected_worker_count: int,
) -> None:
options = options_bootstrapper.get_full_options(
known_scope_infos=[
ScopeInfo("", ScopeInfo.GLOBAL),
ScopeInfo("compile.apt", ScopeInfo.TASK),
ScopeInfo("fruit", ScopeInfo.TASK),
],
)
# So we don't choke on these on the cmd line.
options.register("", "--pants-config-files", type=list)
options.register("", "--config-override", type=list)
options.register("compile.apt", "--worker-count")
options.register("fruit", "--apple")
self.assertEqual(
str(expected_worker_count), options.for_scope("compile.apt").worker_count
)
self.assertEqual("red", options.for_scope("fruit").apple)
with temporary_file(binary_mode=False) as fp1, temporary_file(binary_mode=False) as fp2:
fp1.write(
dedent(
"""\
[compile.apt]
worker_count: 1
[fruit]
apple: red
"""
)
)
fp2.write(
dedent(
"""\
[compile.apt]
worker_count: 2
"""
)
)
fp1.close()
fp2.close()
assert_config_read_correctly(
create_options_bootstrapper(fp1.name), expected_worker_count=1,
)
assert_config_read_correctly(
create_options_bootstrapper(fp1.name, fp2.name), expected_worker_count=2,
)
assert_config_read_correctly(
create_options_bootstrapper(fp2.name, fp1.name), expected_worker_count=1,
)
def test_options_pantsrc_files(self) -> None:
def create_options_bootstrapper(*config_paths: str) -> OptionsBootstrapper:
return OptionsBootstrapper.create(args=[f"--pantsrc-files={cp}" for cp in config_paths])
with temporary_file(binary_mode=False) as fp:
fp.write(
dedent(
"""
[resolver]
resolver: coursier
"""
)
)
fp.close()
bootstrapped_options = create_options_bootstrapper(fp.name)
opts_single_config = bootstrapped_options.get_full_options(
known_scope_infos=[
ScopeInfo("", ScopeInfo.GLOBAL),
ScopeInfo("resolver", ScopeInfo.TASK),
]
)
opts_single_config.register("", "--pantsrc-files", type=list)
opts_single_config.register("resolver", "--resolver")
self.assertEqual("coursier", opts_single_config.for_scope("resolver").resolver)
def test_full_options_caching(self) -> None:
with temporary_file_path() as config:
args = self._config_path(config)
bootstrapper = OptionsBootstrapper.create(env={}, args=args)
opts1 = bootstrapper.get_full_options(
known_scope_infos=[
ScopeInfo("", ScopeInfo.GLOBAL),
ScopeInfo("foo", ScopeInfo.TASK),
]
)
opts2 = bootstrapper.get_full_options(
known_scope_infos=[
ScopeInfo("foo", ScopeInfo.TASK),
ScopeInfo("", ScopeInfo.GLOBAL),
]
)
assert opts1 is opts2
opts3 = bootstrapper.get_full_options(
known_scope_infos=[
ScopeInfo("", ScopeInfo.GLOBAL),
ScopeInfo("foo", ScopeInfo.TASK),
ScopeInfo("", ScopeInfo.GLOBAL),
]
)
assert opts1 is opts3
opts4 = bootstrapper.get_full_options(
known_scope_infos=[ScopeInfo("", ScopeInfo.GLOBAL)]
)
assert opts1 is not opts4
opts5 = bootstrapper.get_full_options(
known_scope_infos=[ScopeInfo("", ScopeInfo.GLOBAL)]
)
assert opts4 is opts5
assert opts1 is not opts5
def test_bootstrap_short_options(self) -> None:
def parse_options(*args: str) -> OptionValueContainer:
full_args = [*args, *self._config_path(None)]
return (
OptionsBootstrapper.create(args=full_args)
.get_bootstrap_options()
.for_global_scope()
)
# No short options passed - defaults presented.
vals = parse_options()
self.assertIsNone(vals.logdir)
self.assertEqual("info", vals.level)
# Unrecognized short options passed and ignored - defaults presented.
vals = parse_options("-_UnderscoreValue", "-^")
self.assertIsNone(vals.logdir)
self.assertEqual("info", vals.level)
vals = parse_options("-d/tmp/logs", "-ldebug")
self.assertEqual("/tmp/logs", vals.logdir)
self.assertEqual("debug", vals.level)
def test_bootstrap_options_passthrough_dup_ignored(self) -> None:
def parse_options(*args: str) -> OptionValueContainer:
full_args = [*args, *self._config_path(None)]
return (
OptionsBootstrapper.create(args=full_args)
.get_bootstrap_options()
.for_global_scope()
)
vals = parse_options("main", "args", "-d/tmp/frogs", "--", "-d/tmp/logs")
self.assertEqual("/tmp/frogs", vals.logdir)
vals = parse_options("main", "args", "--", "-d/tmp/logs")
self.assertIsNone(vals.logdir)
def test_bootstrap_options_explicit_config_path(self) -> None:
def config_path(*args, **env):
return OptionsBootstrapper.get_config_file_paths(env, args)
self.assertEqual(
["/foo/bar/pants.toml"],
config_path("main", "args", "--pants-config-files=['/foo/bar/pants.toml']"),
)
self.assertEqual(
["/from/env1", "/from/env2"],
config_path("main", "args", PANTS_CONFIG_FILES="['/from/env1', '/from/env2']"),
)
self.assertEqual(
["/from/flag"],
config_path(
"main",
"args",
"-x",
"--pants-config-files=['/from/flag']",
"goal",
"--other-flag",
PANTS_CONFIG_FILES="['/from/env']",
),
)
# Test appending to the default.
self.assertEqual(
[f"{get_buildroot()}/pants.toml", "/from/env", "/from/flag"],
config_path(
"main",
"args",
"-x",
"--pants-config-files=+['/from/flag']",
"goal",
"--other-flag",
PANTS_CONFIG_FILES="+['/from/env']",
),
)
# Test replacing the default, then appending.
self.assertEqual(
["/from/env", "/from/flag"],
config_path(
"main",
"args",
"-x",
"--pants-config-files=+['/from/flag']",
"goal",
"--other-flag",
PANTS_CONFIG_FILES="['/from/env']",
),
)
self.assertEqual(
["/from/flag"],
config_path(
"main",
"args",
"-x",
"--pants-config-files=['/from/flag']",
"goal",
"--other-flag",
PANTS_CONFIG_FILES="+['/from/env']",
),
)
def test_setting_pants_config_in_config(self) -> None:
# Test that setting pants_config in the config file has no effect.
with temporary_dir() as tmpdir:
config1 = os.path.join(tmpdir, "config1")
config2 = os.path.join(tmpdir, "config2")
with open(config1, "w") as out1:
out1.write(f"[DEFAULT]\npants_config_files: ['{config2}']\nlogdir: logdir1\n")
with open(config2, "w") as out2:
out2.write("[DEFAULT]\nlogdir: logdir2\n")
ob = OptionsBootstrapper.create(env={}, args=[f"--pants-config-files=['{config1}']"])
logdir = ob.get_bootstrap_options().for_global_scope().logdir
self.assertEqual("logdir1", logdir)
| |
import sys
import time
from django.db.backends.creation import BaseDatabaseCreation
from django.utils import six
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = six.raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = six.raw_input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER
self.connection.settings_dict['PASSWORD'] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['dbname'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['dbname'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
def set_autocommit(self):
self.connection.connection.autocommit = True
| |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.utils import local_session, type_schema
from .core import Filter, ValueFilter, FilterValidationError
from .related import RelatedResourceFilter
class SecurityGroupFilter(RelatedResourceFilter):
"""Filter a resource by its associated security groups."""
schema = type_schema(
'security-group', rinherit=ValueFilter.schema,
**{'match-resource':{'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.SecurityGroup"
AnnotationKey = "matched-security-groups"
class SubnetFilter(RelatedResourceFilter):
"""Filter a resource by its associated subnets."""
schema = type_schema(
'subnet', rinherit=ValueFilter.schema,
**{'match-resource':{'type': 'boolean'},
'operator': {'enum': ['and', 'or']}})
RelatedResource = "c7n.resources.vpc.Subnet"
AnnotationKey = "matched-subnets"
class DefaultVpcBase(Filter):
"""Filter to resources in a default vpc."""
vpcs = None
default_vpc = None
permissions = ('ec2:DescribeVpcs',)
def match(self, vpc_id):
if self.default_vpc is None:
self.log.debug("querying default vpc %s" % vpc_id)
client = local_session(self.manager.session_factory).client('ec2')
vpcs = [v['VpcId'] for v
in client.describe_vpcs()['Vpcs']
if v['IsDefault']]
if vpcs:
self.default_vpc = vpcs.pop()
return vpc_id == self.default_vpc and True or False
class NetworkLocation(Filter):
"""On a network attached resource, determine intersection of
security-group attributes, subnet attributes, and resource attributes.
The use case is a bit specialized, for most use cases using `subnet`
and `security-group` filters suffice. but say for example you wanted to
verify that an ec2 instance was only using subnets and security groups
with a given tag value, and that tag was not present on the resource.
"""
schema = type_schema(
'network-location',
**{'missing-ok': {
'type': 'boolean',
'default': False,
'description': (
"How to handle missing keys on elements, by default this causes "
"resources to be considered not-equal")},
'match': {'type': 'string', 'enum': ['equal', 'non-equal'],
'default': 'non-equal'},
'compare': {
'type': 'array',
'description': (
'Which elements of network location should be considered when'
' matching.'),
'default': ['resource', 'subnet', 'security-group'],
'items': {
'enum': ['resource', 'subnet', 'security-group']}},
'key': {
'type': 'string',
'description': 'The attribute expression that should be matched on'},
'max-cardinality': {
'type': 'integer', 'default': 1,
'title': ''},
'required': ['key']
})
permissions = ('ec2:DescribeSecurityGroups', 'ec2:DescribeSubnets')
def validate(self):
rfilters = self.manager.filter_registry.keys()
if 'subnet' not in rfilters:
raise FilterValidationError(
"network-location requires resource subnet filter availability")
if 'security-group' not in rfilters:
raise FilterValidationError(
"network-location requires resource security-group filter availability")
return self
def process(self, resources, event=None):
self.sg = self.manager.filter_registry.get('security-group')({}, self.manager)
related_sg = self.sg.get_related(resources)
self.subnet = self.manager.filter_registry.get('subnet')({}, self.manager)
related_subnet = self.subnet.get_related(resources)
self.sg_model = self.manager.get_resource_manager('security-group').get_model()
self.subnet_model = self.manager.get_resource_manager('subnet').get_model()
self.vf = self.manager.filter_registry.get('value')({}, self.manager)
# filter options
key = self.data.get('key')
self.compare = self.data.get('compare', ['subnet', 'security-group', 'resource'])
self.max_cardinality = self.data.get('max-cardinality', 1)
self.match = self.data.get('match', 'not-equal')
self.missing_ok = self.data.get('missing-ok', False)
results = []
for r in resources:
resource_sgs = [related_sg[sid] for sid in self.sg.get_related_ids([r])]
resource_subnets = [
related_subnet[sid] for sid in self.subnet.get_related_ids([r])]
found = self.process_resource(r, resource_sgs, resource_subnets, key)
if found:
results.append(found)
return results
def process_resource(self, r, resource_sgs, resource_subnets, key):
evaluation = []
if 'subnet' in self.compare:
subnet_values = {
rsub[self.subnet_model.id]: self.subnet.get_resource_value(key, rsub)
for rsub in resource_subnets}
if not self.missing_ok and None in subnet_values.values():
evaluation.append({
'reason': 'SubnetLocationAbsent',
'subnets': subnet_values})
subnet_space = set(filter(None, subnet_values.values()))
if len(subnet_space) > self.max_cardinality:
evaluation.append({
'reason': 'SubnetLocationCardinality',
'subnets': subnet_values})
if 'security-group' in self.compare:
sg_values = {
rsg[self.sg_model.id]: self.sg.get_resource_value(key, rsg)
for rsg in resource_sgs}
if not self.missing_ok and None in sg_values.values():
evaluation.append({
'reason': 'SecurityGroupLocationAbsent',
'security-groups': sg_values})
sg_space = set(filter(None, sg_values.values()))
if len(sg_space) > self.max_cardinality:
evaluation.append({
'reason': 'SecurityGroupLocationCardinality',
'security-groups': sg_values})
if ('subnet' in self.compare and
'security-group' in self.compare and
sg_space != subnet_space):
evaluation.append({
'reason': 'LocationMismatch',
'subnets': subnet_values,
'security-groups': sg_values})
if 'resource' in self.compare:
r_value = self.vf.get_resource_value(key, r)
if not self.missing_ok and r_value is None:
evaluation.append({
'reason': 'ResourceLocationAbsent',
'resource': r_value})
elif 'security-group' in self.compare and r_value not in sg_space:
evaluation.append({
'reason': 'ResourceLocationMismatch',
'resource': r_value,
'security-groups': sg_values})
elif 'subnet' in self.compare and r_value not in subnet_space:
evaluation.append({
'reason': 'ResourceLocationMismatch',
'resource': r_value,
'subnet': subnet_values})
if evaluation and self.match == 'not-equal':
r['c7n:NetworkLocation'] = evaluation
return r
elif not evaluation and self.match == 'equal':
return r
| |
# Copyright 2002 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""This package implements pairwise sequence alignment using a dynamic
programming algorithm.
This provides functions to get global and local alignments between two
sequences. A global alignment finds the best concordance between all
characters in two sequences. A local alignment finds just the
subsequences that align the best.
When doing alignments, you can specify the match score and gap
penalties. The match score indicates the compatibility between an
alignment of two characters in the sequences. Highly compatible
characters should be given positive scores, and incompatible ones
should be given negative scores or 0. The gap penalties should be
negative.
The names of the alignment functions in this module follow the
convention
<alignment type>XX
where <alignment type> is either "global" or "local" and XX is a 2
character code indicating the parameters it takes. The first
character indicates the parameters for matches (and mismatches), and
the second indicates the parameters for gap penalties.
The match parameters are
CODE DESCRIPTION
x No parameters. Identical characters have score of 1, otherwise 0.
m A match score is the score of identical chars, otherwise mismatch score.
d A dictionary returns the score of any pair of characters.
c A callback function returns scores.
The gap penalty parameters are
CODE DESCRIPTION
x No gap penalties.
s Same open and extend gap penalties for both sequences.
d The sequences have different open and extend gap penalties.
c A callback function returns the gap penalties.
All the different alignment functions are contained in an object
"align". For example:
#>>> from Bio import pairwise2
#>>> alignments = pairwise2.align.globalxx("ACCGT", "ACG")
will return a list of the alignments between the two strings. The
parameters of the alignment function depends on the function called.
Some examples:
#>>> pairwise2.align.globalxx("ACCGT", "ACG")
# Find the best global alignment between the two sequences.
# Identical characters are given 1 point. No points are deducted
# for mismatches or gaps.
#>>> pairwise2.align.localxx("ACCGT", "ACG")
# Same thing as before, but with a local alignment.
#>>> pairwise2.align.globalmx("ACCGT", "ACG", 2, -1)
# Do a global alignment. Identical characters are given 2 points,
# 1 point is deducted for each non-identical character.
#>>> pairwise2.align.globalms("ACCGT", "ACG", 2, -1, -.5, -.1)
# Same as above, except now 0.5 points are deducted when opening a
# gap, and 0.1 points are deducted when extending it.
To see a description of the parameters for a function, please look at
the docstring for the function.
#>>> print newalign.align.localds.__doc__
localds(sequenceA, sequenceB, match_dict, open, extend) -> alignments
"""
# The alignment functions take some undocumented keyword parameters:
# - penalize_extend_when_opening: boolean
# Whether to count an extension penalty when opening a gap. If
# false, a gap of 1 is only penalize an "open" penalty, otherwise it
# is penalized "open+extend".
# - penalize_end_gaps: boolean
# Whether to count the gaps at the ends of an alignment. By
# default, they are counted for global alignments but not for local
# ones.
# - gap_char: string
# Which character to use as a gap character in the alignment
# returned. By default, uses '-'.
# - force_generic: boolean
# Always use the generic, non-cached, dynamic programming function.
# For debugging.
# - score_only: boolean
# Only get the best score, don't recover any alignments. The return
# value of the function is the score.
# - one_alignment_only: boolean
# Only recover one alignment.
from types import *
import listfns
MAX_ALIGNMENTS = 1000 # maximum alignments recovered in traceback
class align:
"""This class provides functions that do alignments."""
class alignment_function:
"""This class is callable impersonates an alignment function.
The constructor takes the name of the function. This class
will decode the name of the function to figure out how to
interpret the parameters.
"""
# match code -> tuple of (parameters, docstring)
match2args = {
'x' : ([], ''),
'm' : (['match', 'mismatch'],
"""match is the score to given to identical characters. mismatch is
the score given to non-identical ones."""),
'd' : (['match_dict'],
"""match_dict is a dictionary where the keys are tuples of pairs of
characters and the values are the scores, e.g. ("A", "C") : 2.5."""),
'c' : (['match_fn'],
"""match_fn is a callback function that takes two characters and
returns the score between them."""),
}
# penalty code -> tuple of (parameters, docstring)
penalty2args = {
'x' : ([], ''),
's' : (['open', 'extend'],
"""open and extend are the gap penalties when a gap is opened and
extended. They should be negative."""),
'd' : (['openA', 'extendA', 'openB', 'extendB'],
"""openA and extendA are the gap penalties for sequenceA, and openB
and extendB for sequeneB. The penalties should be negative."""),
'c' : (['gap_A_fn', 'gap_B_fn'],
"""gap_A_fn and gap_B_fn are callback functions that takes 1) the
index where the gap is opened, and 2) the length of the gap. They
should return a gap penalty."""),
}
def __init__(self, name):
# Check to make sure the name of the function is
# reasonable.
if name.startswith("global"):
if len(name) != 8:
raise AttributeError, "function should be globalXX"
elif name.startswith("local"):
if len(name) != 7:
raise AttributeError, "function should be localXX"
else:
raise AttributeError, name
align_type, match_type, penalty_type = \
name[:-2], name[-2], name[-1]
try:
match_args, match_doc = self.match2args[match_type]
except KeyError, x:
raise AttributeError, "unknown match type %r" % match_type
try:
penalty_args, penalty_doc = self.penalty2args[penalty_type]
except KeyError, x:
raise AttributeError, "unknown penalty type %r" % penalty_type
# Now get the names of the parameters to this function.
param_names = ['sequenceA', 'sequenceB']
param_names.extend(match_args)
param_names.extend(penalty_args)
self.function_name = name
self.align_type = align_type
self.param_names = param_names
self.__name__ = self.function_name
# Set the doc string.
doc = "%s(%s) -> alignments\n" % (
self.__name__, ', '.join(self.param_names))
if match_doc:
doc += "\n%s\n" % match_doc
if penalty_doc:
doc += "\n%s\n" % penalty_doc
doc += (
"""\nalignments is a list of tuples (seqA, seqB, score, begin, end).
seqA and seqB are strings showing the alignment between the
sequences. score is the score of the alignment. begin and end
are indexes into seqA and seqB that indicate the where the
alignment occurs.
""")
self.__doc__ = doc
def decode(self, *args, **keywds):
# Decode the arguments for the _align function. keywds
# will get passed to it, so translate the arguments to
# this function into forms appropriate for _align.
keywds = keywds.copy()
if len(args) != len(self.param_names):
raise TypeError, "%s takes exactly %d argument (%d given)" % (
self.function_name, len(self.param_names), len(args))
i = 0
while i < len(self.param_names):
if self.param_names[i] in [
'sequenceA', 'sequenceB',
'gap_A_fn', 'gap_B_fn', 'match_fn']:
keywds[self.param_names[i]] = args[i]
i += 1
elif self.param_names[i] == 'match':
assert self.param_names[i+1] == 'mismatch'
match, mismatch = args[i], args[i+1]
keywds['match_fn'] = identity_match(match, mismatch)
i += 2
elif self.param_names[i] == 'match_dict':
keywds['match_fn'] = dictionary_match(args[i])
i += 1
elif self.param_names[i] == 'open':
assert self.param_names[i+1] == 'extend'
open, extend = args[i], args[i+1]
pe = keywds.get('penalize_extend_when_opening', 0)
keywds['gap_A_fn'] = affine_penalty(open, extend, pe)
keywds['gap_B_fn'] = affine_penalty(open, extend, pe)
i += 2
elif self.param_names[i] == 'openA':
assert self.param_names[i+3] == 'extendB'
openA, extendA, openB, extendB = args[i:i+4]
pe = keywds.get('penalize_extend_when_opening', 0)
keywds['gap_A_fn'] = affine_penalty(openA, extendA, pe)
keywds['gap_B_fn'] = affine_penalty(openB, extendB, pe)
i += 4
else:
raise ValueError, "unknown parameter %r" % \
self.param_names[i]
# Here are the default parameters for _align. Assign
# these to keywds, unless already specified.
pe = keywds.get('penalize_extend_when_opening', 0)
default_params = [
('match_fn', identity_match(1, 0)),
('gap_A_fn', affine_penalty(0, 0, pe)),
('gap_B_fn', affine_penalty(0, 0, pe)),
('penalize_extend_when_opening', 0),
('penalize_end_gaps', self.align_type == 'global'),
('align_globally', self.align_type == 'global'),
('gap_char', '-'),
('force_generic', 0),
('score_only', 0),
('one_alignment_only', 0)
]
for name, default in default_params:
keywds[name] = keywds.get(name, default)
return keywds
def __call__(self, *args, **keywds):
keywds = self.decode(*args, **keywds)
return _align(**keywds)
def __getattr__(self, attr):
return self.alignment_function(attr)
align = align()
def _align(sequenceA, sequenceB, match_fn, gap_A_fn, gap_B_fn,
penalize_extend_when_opening, penalize_end_gaps,
align_globally, gap_char, force_generic, score_only,
one_alignment_only):
if not sequenceA or not sequenceB:
return []
if (not force_generic) and \
type(gap_A_fn) is InstanceType and \
gap_A_fn.__class__ is affine_penalty and \
type(gap_B_fn) is InstanceType and \
gap_B_fn.__class__ is affine_penalty:
open_A, extend_A = gap_A_fn.open, gap_A_fn.extend
open_B, extend_B = gap_B_fn.open, gap_B_fn.extend
x = _make_score_matrix_fast(
sequenceA, sequenceB, match_fn, open_A, extend_A, open_B, extend_B,
penalize_extend_when_opening, penalize_end_gaps, align_globally,
score_only)
else:
x = _make_score_matrix_generic(
sequenceA, sequenceB, match_fn, gap_A_fn, gap_B_fn,
penalize_extend_when_opening, penalize_end_gaps, align_globally,
score_only)
score_matrix, trace_matrix = x
#print "SCORE"; print_matrix(score_matrix)
#print "TRACEBACK"; print_matrix(trace_matrix)
# Look for the proper starting point. Get a list of all possible
# starting points.
starts = _find_start(
score_matrix, sequenceA, sequenceB,
gap_A_fn, gap_B_fn, penalize_end_gaps, align_globally)
# Find the highest score.
best_score = max([x[0] for x in starts])
# If they only want the score, then return it.
if score_only:
return best_score
tolerance = 0 # XXX do anything with this?
# Now find all the positions within some tolerance of the best
# score.
i = 0
while i < len(starts):
score, pos = starts[i]
if rint(abs(score-best_score)) > rint(tolerance):
del starts[i]
else:
i += 1
# Recover the alignments and return them.
x = _recover_alignments(
sequenceA, sequenceB, starts, score_matrix, trace_matrix,
align_globally, penalize_end_gaps, gap_char, one_alignment_only)
return x
def _make_score_matrix_generic(
sequenceA, sequenceB, match_fn, gap_A_fn, gap_B_fn,
penalize_extend_when_opening, penalize_end_gaps, align_globally,
score_only):
# This is an implementation of the Needleman-Wunsch dynamic
# programming algorithm for aligning sequences.
# Create the score and traceback matrices. These should be in the
# shape:
# sequenceA (down) x sequenceB (across)
lenA, lenB = len(sequenceA), len(sequenceB)
score_matrix, trace_matrix = [], []
for i in range(lenA):
score_matrix.append([None] * lenB)
trace_matrix.append([[None]] * lenB)
# The top and left borders of the matrices are special cases
# because there are no previously aligned characters. To simplify
# the main loop, handle these separately.
for i in range(lenA):
# Align the first residue in sequenceB to the ith residue in
# sequence A. This is like opening up i gaps at the beginning
# of sequence B.
score = match_fn(sequenceA[i], sequenceB[0])
if penalize_end_gaps:
score += gap_B_fn(0, i)
score_matrix[i][0] = score
for i in range(1, lenB):
score = match_fn(sequenceA[0], sequenceB[i])
if penalize_end_gaps:
score += gap_A_fn(0, i)
score_matrix[0][i] = score
# Fill in the score matrix. Each position in the matrix
# represents an alignment between a character from sequenceA to
# one in sequence B. As I iterate through the matrix, find the
# alignment by choose the best of:
# 1) extending a previous alignment without gaps
# 2) adding a gap in sequenceA
# 3) adding a gap in sequenceB
for row in range(1, lenA):
for col in range(1, lenB):
# First, calculate the score that would occur by extending
# the alignment without gaps.
best_score = score_matrix[row-1][col-1]
best_score_rint = rint(best_score)
best_indexes = [(row-1, col-1)]
# Try to find a better score by opening gaps in sequenceA.
# Do this by checking alignments from each column in the
# previous row. Each column represents a different
# character to align from, and thus a different length
# gap.
for i in range(0, col-1):
score = score_matrix[row-1][i] + gap_A_fn(i, col-1-i)
score_rint = rint(score)
if score_rint == best_score_rint:
best_score, best_score_rint = score, score_rint
best_indexes.append((row-1, i))
elif score_rint > best_score_rint:
best_score, best_score_rint = score, score_rint
best_indexes = [(row-1, i)]
# Try to find a better score by opening gaps in sequenceB.
for i in range(0, row-1):
score = score_matrix[i][col-1] + gap_B_fn(i, row-1-i)
score_rint = rint(score)
if score_rint == best_score_rint:
best_score, best_score_rint = score, score_rint
best_indexes.append((i, col-1))
elif score_rint > best_score_rint:
best_score, best_score_rint = score, score_rint
best_indexes = [(i, col-1)]
score_matrix[row][col] = best_score + \
match_fn(sequenceA[row], sequenceB[col])
if not align_globally and score_matrix[row][col] < 0:
score_matrix[row][col] = 0
trace_matrix[row][col] = best_indexes
return score_matrix, trace_matrix
def _make_score_matrix_fast(
sequenceA, sequenceB, match_fn, open_A, extend_A, open_B, extend_B,
penalize_extend_when_opening, penalize_end_gaps,
align_globally, score_only):
first_A_gap = calc_affine_penalty(1, open_A, extend_A,
penalize_extend_when_opening)
first_B_gap = calc_affine_penalty(1, open_B, extend_B,
penalize_extend_when_opening)
# Create the score and traceback matrices. These should be in the
# shape:
# sequenceA (down) x sequenceB (across)
lenA, lenB = len(sequenceA), len(sequenceB)
score_matrix, trace_matrix = [], []
for i in range(lenA):
score_matrix.append([None] * lenB)
trace_matrix.append([[None]] * lenB)
# The top and left borders of the matrices are special cases
# because there are no previously aligned characters. To simplify
# the main loop, handle these separately.
for i in range(lenA):
# Align the first residue in sequenceB to the ith residue in
# sequence A. This is like opening up i gaps at the beginning
# of sequence B.
score = match_fn(sequenceA[i], sequenceB[0])
if penalize_end_gaps:
score += calc_affine_penalty(
i, open_B, extend_B, penalize_extend_when_opening)
score_matrix[i][0] = score
for i in range(1, lenB):
score = match_fn(sequenceA[0], sequenceB[i])
if penalize_end_gaps:
score += calc_affine_penalty(
i, open_A, extend_A, penalize_extend_when_opening)
score_matrix[0][i] = score
# In the generic algorithm, at each row and column in the score
# matrix, we had to scan all previous rows and columns to see
# whether opening a gap might yield a higher score. Here, since
# we know the penalties are affine, we can cache just the best
# score in the previous rows and columns. Instead of scanning
# through all the previous rows and cols, we can just look at the
# cache for the best one. Whenever the row or col increments, the
# best cached score just decreases by extending the gap longer.
# The best score and indexes for each row (goes down all columns).
# I don't need to store the last row because it's the end of the
# sequence.
row_cache_score, row_cache_index = [None]*(lenA-1), [None]*(lenA-1)
# The best score and indexes for each column (goes across rows).
col_cache_score, col_cache_index = [None]*(lenB-1), [None]*(lenB-1)
for i in range(lenA-1):
# Initialize each row to be the alignment of sequenceA[i] to
# sequenceB[0], plus opening a gap in sequenceA.
row_cache_score[i] = score_matrix[i][0] + first_A_gap
row_cache_index[i] = [(i, 0)]
for i in range(lenB-1):
col_cache_score[i] = score_matrix[0][i] + first_B_gap
col_cache_index[i] = [(0, i)]
# Fill in the score_matrix.
for row in range(1, lenA):
for col in range(1, lenB):
# Calculate the score that would occur by extending the
# alignment without gaps.
nogap_score = score_matrix[row-1][col-1]
# Check the score that would occur if there were a gap in
# sequence A.
if col > 1:
row_score = row_cache_score[row-1]
else:
row_score = nogap_score - 1 # Make sure it's not the best.
# Check the score that would occur if there were a gap in
# sequence B.
if row > 1:
col_score = col_cache_score[col-1]
else:
col_score = nogap_score - 1
best_score = max(nogap_score, row_score, col_score)
best_score_rint = rint(best_score)
best_index = []
if best_score_rint == rint(nogap_score):
best_index.append((row-1, col-1))
if best_score_rint == rint(row_score):
best_index.extend(row_cache_index[row-1])
if best_score_rint == rint(col_score):
best_index.extend(col_cache_index[col-1])
# Set the score and traceback matrices.
score = best_score + match_fn(sequenceA[row], sequenceB[col])
if not align_globally and score < 0:
score_matrix[row][col] = 0
else:
score_matrix[row][col] = score
trace_matrix[row][col] = best_index
# Update the cached column scores. The best score for
# this can come from either extending the gap in the
# previous cached score, or opening a new gap from the
# most previously seen character. Compare the two scores
# and keep the best one.
open_score = score_matrix[row-1][col-1] + first_B_gap
extend_score = col_cache_score[col-1] + extend_B
open_score_rint, extend_score_rint = \
rint(open_score), rint(extend_score)
if open_score_rint > extend_score_rint:
col_cache_score[col-1] = open_score
col_cache_index[col-1] = [(row-1, col-1)]
elif extend_score_rint > open_score_rint:
col_cache_score[col-1] = extend_score
else:
col_cache_score[col-1] = open_score
if (row-1, col-1) not in col_cache_index[col-1]:
col_cache_index[col-1] = col_cache_index[col-1] + \
[(row-1, col-1)]
# Update the cached row scores.
open_score = score_matrix[row-1][col-1] + first_A_gap
extend_score = row_cache_score[row-1] + extend_A
open_score_rint, extend_score_rint = \
rint(open_score), rint(extend_score)
if open_score_rint > extend_score_rint:
row_cache_score[row-1] = open_score
row_cache_index[row-1] = [(row-1, col-1)]
elif extend_score_rint > open_score_rint:
row_cache_score[row-1] = extend_score
else:
row_cache_score[row-1] = open_score
if (row-1, col-1) not in row_cache_index[row-1]:
row_cache_index[row-1] = row_cache_index[row-1] + \
[(row-1, col-1)]
return score_matrix, trace_matrix
def _recover_alignments(sequenceA, sequenceB, starts,
score_matrix, trace_matrix, align_globally,
penalize_end_gaps, gap_char, one_alignment_only):
# Recover the alignments by following the traceback matrix. This
# is a recursive procedure, but it's implemented here iteratively
# with a stack.
lenA, lenB = len(sequenceA), len(sequenceB)
tracebacks = [] # list of (seq1, seq2, score, begin, end)
in_process = [] # list of ([same as tracebacks], prev_pos, next_pos)
# sequenceA and sequenceB may be sequences, including strings,
# lists, or list-like objects. In order to preserve the type of
# the object, we need to use slices on the sequences instead of
# indexes. For example, sequenceA[row] may return a type that's
# not compatible with sequenceA, e.g. if sequenceA is a list and
# sequenceA[row] is a string. Thus, avoid using indexes and use
# slices, e.g. sequenceA[row:row+1]. Assume that client-defined
# sequence classes preserve these semantics.
# Initialize the in_process stack
for score, (row, col) in starts:
if align_globally:
begin, end = None, None
else:
begin, end = None, -max(lenA-row, lenB-col)+1
if not end:
end = None
# Initialize the in_process list with empty sequences of the
# same type as sequenceA. To do this, take empty slices of
# the sequences.
in_process.append(
(sequenceA[0:0], sequenceB[0:0], score, begin, end,
(lenA, lenB), (row, col)))
if one_alignment_only:
break
while in_process and len(tracebacks) < MAX_ALIGNMENTS:
seqA, seqB, score, begin, end, prev_pos, next_pos = in_process.pop()
prevA, prevB = prev_pos
if next_pos is None:
prevlen = len(seqA)
# add the rest of the sequences
seqA = sequenceA[:prevA] + seqA
seqB = sequenceB[:prevB] + seqB
# add the rest of the gaps
seqA, seqB = _lpad_until_equal(seqA, seqB, gap_char)
# Now make sure begin is set.
if begin is None:
if align_globally:
begin = 0
else:
begin = len(seqA) - prevlen
tracebacks.append((seqA, seqB, score, begin, end))
else:
nextA, nextB = next_pos
nseqA, nseqB = prevA-nextA, prevB-nextB
maxseq = max(nseqA, nseqB)
ngapA, ngapB = maxseq-nseqA, maxseq-nseqB
seqA = sequenceA[nextA:nextA+nseqA] + gap_char*ngapA + seqA
seqB = sequenceB[nextB:nextB+nseqB] + gap_char*ngapB + seqB
prev_pos = next_pos
# local alignment stops early if score falls < 0
if not align_globally and score_matrix[nextA][nextB] <= 0:
begin = max(prevA, prevB)
in_process.append(
(seqA, seqB, score, begin, end, prev_pos, None))
else:
for next_pos in trace_matrix[nextA][nextB]:
in_process.append(
(seqA, seqB, score, begin, end, prev_pos, next_pos))
if one_alignment_only:
break
return _clean_alignments(tracebacks)
def _find_start(score_matrix, sequenceA, sequenceB, gap_A_fn, gap_B_fn,
penalize_end_gaps, align_globally):
# Return a list of (score, (row, col)) indicating every possible
# place to start the tracebacks.
if align_globally:
if penalize_end_gaps:
starts = _find_global_start(
sequenceA, sequenceB, score_matrix, gap_A_fn, gap_B_fn, 1)
else:
starts = _find_global_start(
sequenceA, sequenceB, score_matrix, None, None, 0)
else:
starts = _find_local_start(score_matrix)
return starts
def _find_global_start(sequenceA, sequenceB,
score_matrix, gap_A_fn, gap_B_fn, penalize_end_gaps):
# The whole sequence should be aligned, so return the positions at
# the end of either one of the sequences.
nrows, ncols = len(score_matrix), len(score_matrix[0])
positions = []
# Search all rows in the last column.
for row in range(nrows):
# Find the score, penalizing end gaps if necessary.
score = score_matrix[row][ncols-1]
if penalize_end_gaps:
score += gap_B_fn(ncols, nrows-row-1)
positions.append((score, (row, ncols-1)))
# Search all columns in the last row.
for col in range(ncols-1):
score = score_matrix[nrows-1][col]
if penalize_end_gaps:
score += gap_A_fn(nrows, ncols-col-1)
positions.append((score, (nrows-1, col)))
return positions
def _find_local_start(score_matrix):
# Return every position in the matrix.
positions = []
nrows, ncols = len(score_matrix), len(score_matrix[0])
for row in range(nrows):
for col in range(ncols):
score = score_matrix[row][col]
positions.append((score, (row, col)))
return positions
def _clean_alignments(alignments):
# Take a list of alignments and return a cleaned version. Remove
# duplicates, make sure begin and end are set correctly, remove
# empty alignments.
alignments = listfns.items(alignments) # Get rid of duplicates
i = 0
while i < len(alignments):
seqA, seqB, score, begin, end = alignments[i]
# Make sure end is set reasonably.
if end is None: # global alignment
end = len(seqA)
elif end < 0:
end = end + len(seqA)
# If there's no alignment here, get rid of it.
if begin >= end:
del alignments[i]
continue
alignments[i] = seqA, seqB, score, begin, end
i += 1
return alignments
def _pad_until_equal(s1, s2, char):
# Add char to the end of s1 or s2 until they are equal length.
ls1, ls2 = len(s1), len(s2)
if ls1 < ls2:
s1 = _pad(s1, char, ls2-ls1)
elif ls2 < ls1:
s2 = _pad(s2, char, ls1-ls2)
return s1, s2
def _lpad_until_equal(s1, s2, char):
# Add char to the beginning of s1 or s2 until they are equal
# length.
ls1, ls2 = len(s1), len(s2)
if ls1 < ls2:
s1 = _lpad(s1, char, ls2-ls1)
elif ls2 < ls1:
s2 = _lpad(s2, char, ls1-ls2)
return s1, s2
def _pad(s, char, n):
# Append n chars to the end of s.
return s + char*n
def _lpad(s, char, n):
# Prepend n chars to the beginning of s.
return char*n + s
_PRECISION = 1000
def rint(x, precision=_PRECISION):
return int(x * precision + 0.5)
class identity_match:
"""identity_match([match][, mismatch]) -> match_fn
Create a match function for use in an alignment. match and
mismatch are the scores to give when two residues are equal or
unequal. By default, match is 1 and mismatch is 0.
"""
def __init__(self, match=1, mismatch=0):
self.match = match
self.mismatch = mismatch
def __call__(self, charA, charB):
if charA == charB:
return self.match
return self.mismatch
class dictionary_match:
"""dictionary_match(score_dict[, symmetric]) -> match_fn
Create a match function for use in an alignment. score_dict is a
dictionary where the keys are tuples (residue 1, residue 2) and
the values are the match scores between those residues. symmetric
is a flag that indicates whether the scores are symmetric. If
true, then if (res 1, res 2) doesn't exist, I will use the score
at (res 2, res 1).
"""
def __init__(self, score_dict, symmetric=1):
self.score_dict = score_dict
self.symmetric = symmetric
def __call__(self, charA, charB):
if self.symmetric and not self.score_dict.has_key((charA, charB)):
# If the score dictionary is symmetric, then look up the
# score both ways.
charB, charA = charA, charB
return self.score_dict[(charA, charB)]
class affine_penalty:
"""affine_penalty(open, extend[, penalize_extend_when_opening]) -> gap_fn
Create a gap function for use in an alignment.
"""
def __init__(self, open, extend, penalize_extend_when_opening=0):
if open > 0 or extend > 0:
raise ValueError, "Gap penalties should be non-positive."
self.open, self.extend = open, extend
self.penalize_extend_when_opening = penalize_extend_when_opening
def __call__(self, index, length):
return calc_affine_penalty(
length, self.open, self.extend, self.penalize_extend_when_opening)
def calc_affine_penalty(length, open, extend, penalize_extend_when_opening):
if length <= 0:
return 0
penalty = open + extend * length
if not penalize_extend_when_opening:
penalty -= extend
return penalty
def print_matrix(matrix):
"""print_matrix(matrix)
Print out a matrix. For debugging purposes.
"""
# Transpose the matrix and get the length of the values in each column.
matrixT = [[] for x in range(len(matrix[0]))]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
matrixT[j].append(len(str(matrix[i][j])))
ndigits = map(max, matrixT)
for i in range(len(matrix)):
for j in range(len(matrix[i])):
n = ndigits[j]
print "%*s " % (n, matrix[i][j]),
print
def format_alignment(align1, align2, score, begin, end):
"""format_alignment(align1, align2, score, begin, end) -> string
Format the alignment prettily into a string.
"""
s = []
s.append("%s\n" % align1)
s.append("%s%s\n" % (" "*begin, "|"*(end-begin)))
s.append("%s\n" % align2)
s.append(" Score=%g\n" % score)
return ''.join(s)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Spotify AB
from __future__ import absolute_import, division, print_function
import re
import attr
from six import iteritems, iterkeys, itervalues
from .config import MEDIA_TYPES
from .errors import InvalidRAMLError
from .parameters import (
Documentation, Header, Body, Response, URIParameter, QueryParameter,
FormParameter, SecurityScheme
)
from .parser_utils import (
security_schemes
)
from .raml import RootNode, ResourceNode, ResourceTypeNode, TraitNode
from .utils import (
load_schema, _resource_type_lookup,
_get_resource_type, _get_trait, _get_attribute,
_get_inherited_attribute, _remove_duplicates, _create_uri_params,
_get, _create_base_param_obj, _get_res_type_attribute,
_get_inherited_type_params, _get_inherited_item, _get_attribute_dict,
get_inherited, set_param_object, set_params, _get_data_union,
_preserve_uri_order
)
__all__ = ["parse_raml"]
def parse_raml(loaded_raml, config):
"""
Parse loaded RAML file into RAML/Python objects.
:param RAMLDict loaded_raml: OrderedDict of loaded RAML file
:returns: :py:class:`.raml.RootNode` object.
:raises: :py:class:`.errors.InvalidRAMLError` when RAML file is invalid
"""
validate = str(_get(config, "validate")).lower() == 'true'
# Postpone validating the root node until the end; otherwise,
# we end up with duplicate validation exceptions.
attr.set_run_validators(False)
root = create_root(loaded_raml, config)
attr.set_run_validators(validate)
root.security_schemes = create_sec_schemes(root.raml_obj, root)
root.traits = create_traits(root.raml_obj, root)
root.resource_types = create_resource_types(root.raml_obj, root)
root.resources = create_resources(root.raml_obj, [], root,
parent=None)
if validate:
attr.validate(root) # need to validate again for root node
if root.errors:
raise InvalidRAMLError(root.errors)
return root
def create_root(raml, config):
"""
Creates a Root Node based off of the RAML's root section.
:param RAMLDict raml: loaded RAML file
:returns: :py:class:`.raml.RootNode` object with API root attributes set
"""
errors = []
def protocols():
explicit_protos = _get(raml, "protocols")
implicit_protos = re.findall(r"(https|http)", base_uri())
implicit_protos = [p.upper() for p in implicit_protos]
return explicit_protos or implicit_protos or None
def base_uri():
base_uri = _get(raml, "baseUri", "")
if "{version}" in base_uri:
base_uri = base_uri.replace("{version}",
str(_get(raml, "version")))
return base_uri
def base_uri_params():
data = _get(raml, "baseUriParameters", {})
params = _create_base_param_obj(data, URIParameter, config, errors)
uri = _get(raml, "baseUri", "")
declared = _get(raml, "uriParameters", {})
declared = list(iterkeys(declared))
return _preserve_uri_order(uri, params, config, errors, declared)
def uri_params():
data = _get(raml, "uriParameters", {})
params = _create_base_param_obj(data, URIParameter, config, errors)
uri = _get(raml, "baseUri", "")
declared = []
base = base_uri_params()
if base:
declared = [p.name for p in base]
return _preserve_uri_order(uri, params, config, errors, declared)
def docs():
d = _get(raml, "documentation", [])
assert isinstance(d, list), "Error parsing documentation"
docs = [Documentation(_get(i, "title"), _get(i, "content")) for i in d]
return docs or None
def schemas():
_schemas = _get(raml, "schemas")
if not _schemas:
return None
schemas = []
for schema in _schemas:
value = load_schema(list(itervalues(schema))[0])
schemas.append({list(iterkeys(schema))[0]: value})
return schemas or None
return RootNode(
raml_obj=raml,
raw=raml,
title=_get(raml, "title"),
version=_get(raml, "version"),
protocols=protocols(),
base_uri=base_uri(),
base_uri_params=base_uri_params(),
uri_params=uri_params(),
media_type=_get(raml, "mediaType"),
documentation=docs(),
schemas=schemas(),
config=config,
secured_by=_get(raml, "securedBy"),
errors=errors
)
def create_sec_schemes(raml_data, root):
"""
Parse security schemes into ``SecurityScheme`` objects
:param dict raml_data: Raw RAML data
:param RootNode root: Root Node
:returns: list of :py:class:`.parameters.SecurityScheme` objects
"""
def _map_object_types(item):
return {
"headers": headers,
"body": body,
"responses": responses,
"queryParameters": query_params,
"uriParameters": uri_params,
"formParameters": form_params,
"usage": usage,
"mediaType": media_type,
"protocols": protocols,
"documentation": documentation,
}[item]
def headers(header_data):
_headers = []
header_data = _get(header_data, "headers", {})
for k, v in list(iteritems(header_data)):
h = _create_base_param_obj({k: v},
Header,
root.config,
root.errors)
_headers.extend(h)
return _headers
def body(body_data):
body_data = _get(body_data, "body", {})
_body = []
for k, v in list(iteritems(body_data)):
body = Body(
mime_type=k,
raw=v,
schema=load_schema(_get(v, "schema")),
example=load_schema(_get(v, "example")),
form_params=_get(v, "formParameters"),
config=root.config,
errors=root.errors
)
_body.append(body)
return _body
def responses(resp_data):
_resps = []
resp_data = _get(resp_data, "responses", {})
for k, v in list(iteritems(resp_data)):
response = Response(
code=k,
raw=v,
desc=_get(v, "description"),
headers=headers(_get(v, "headers", {})),
body=body(_get(v, "body", {})),
config=root.config,
errors=root.errors
)
_resps.append(response)
return sorted(_resps, key=lambda x: x.code)
def query_params(param_data):
param_data = _get(param_data, "queryParameters", {})
_params = []
for k, v in list(iteritems(param_data)):
p = _create_base_param_obj({k: v},
QueryParameter,
root.config,
root.errors)
_params.extend(p)
return _params
def uri_params(param_data):
param_data = _get(param_data, "uriParameters")
_params = []
for k, v in list(iteritems(param_data)):
p = _create_base_param_obj({k: v},
URIParameter,
root.config,
root.errors)
_params.extend(p)
return _params
def form_params(param_data):
param_data = _get(param_data, "formParameters", {})
_params = []
for k, v in list(iteritems(param_data)):
p = _create_base_param_obj({k: v},
FormParameter,
root.config,
root.errors)
_params.extend(p)
return _params
def usage(desc_by_data):
return _get(desc_by_data, "usage")
def media_type(desc_by_data):
return _get(desc_by_data, "mediaType")
def protocols(desc_by_data):
return _get(desc_by_data, "protocols")
def documentation(desc_by_data):
d = _get(desc_by_data, "documentation", [])
assert isinstance(d, list), "Error parsing documentation"
docs = [Documentation(_get(i, "title"), _get(i, "content")) for i in d]
return docs or None
def set_property(node, obj, node_data):
func = _map_object_types(obj)
item_objs = func({obj: node_data})
setattr(node, func.__name__, item_objs)
def initial_wrap(key, data):
return SecurityScheme(
name=key,
raw=data,
type=_get(data, "type"),
described_by=_get(data, "describedBy", {}),
desc=_get(data, "description"),
settings=_get(data, "settings"),
config=root.config,
errors=root.errors
)
def final_wrap(node):
for obj, node_data in list(iteritems(node.described_by)):
set_property(node, obj, node_data)
return node
schemes = _get(raml_data, "securitySchemes", [])
scheme_objs = []
for s in schemes:
name = list(iterkeys(s))[0]
data = list(itervalues(s))[0]
node = initial_wrap(name, data)
node = final_wrap(node)
scheme_objs.append(node)
return scheme_objs or None
def create_traits(raml_data, root):
"""
Parse traits into ``Trait`` objects.
:param dict raml_data: Raw RAML data
:param RootNode root: Root Node
:returns: list of :py:class:`.raml.TraitNode` objects
"""
def description():
return _get(data, "description")
def protocols():
return _get(data, "protocols")
def query_params():
return set_param_object(data, "queryParameters", root)
def uri_params():
return set_param_object(data, "uriParameters", root)
def form_params():
return set_param_object(data, "formParameters", root)
def base_uri_params():
return set_param_object(data, "baseUriParameters", root)
def headers(data):
return set_param_object(data, "headers", root)
def body(data):
body = _get(data, "body", {})
body_objects = []
for key, value in list(iteritems(body)):
body = Body(
mime_type=key,
raw=value,
schema=load_schema(_get(value, "schema")),
example=load_schema(_get(value, "example")),
form_params=_get(value, "formParameters"),
config=root.config,
errors=root.errors
)
body_objects.append(body)
return body_objects or None
def responses():
response_objects = []
for key, value in list(iteritems(_get(data, "responses", {}))):
response = Response(
code=key,
raw=value,
desc=_get(value, "description"),
headers=headers(value),
body=body(value),
config=root.config,
errors=root.errors
)
response_objects.append(response)
return sorted(response_objects, key=lambda x: x.code) or None
def wrap(key, data):
return TraitNode(
name=key,
raw=data,
root=root,
query_params=query_params(),
uri_params=uri_params(),
form_params=form_params(),
base_uri_params=base_uri_params(),
headers=headers(data),
body=body(data),
responses=responses(),
desc=description(),
media_type=_get(data, "mediaType"),
usage=_get(data, "usage"),
protocols=protocols(),
errors=root.errors
)
traits = _get(raml_data, "traits", [])
trait_objects = []
for trait in traits:
name = list(iterkeys(trait))[0]
data = list(itervalues(trait))[0]
trait_objects.append(wrap(name, data))
return trait_objects or None
def create_resource_types(raml_data, root):
"""
Parse resourceTypes into ``ResourceTypeNode`` objects.
:param dict raml_data: Raw RAML data
:param RootNode root: Root Node
:returns: list of :py:class:`.raml.ResourceTypeNode` objects
"""
# TODO: move this outside somewhere - config?
accepted_methods = _get(root.config, "http_optional")
#####
# Set ResourceTypeNode attributes
#####
def headers(data):
_headers = _get(data, "headers", {})
if _get(v, "type"):
_headers = _get_inherited_item(_headers, "headers",
resource_types,
meth, v)
header_objs = _create_base_param_obj(_headers,
Header,
root.config,
root.errors)
if header_objs:
for h in header_objs:
h.method = method(meth)
return header_objs
def body(data):
_body = _get(data, "body", default={})
if _get(v, "type"):
_body = _get_inherited_item(_body, "body", resource_types,
meth, v)
body_objects = []
for key, value in list(iteritems(_body)):
body = Body(
mime_type=key,
raw=value,
schema=load_schema(_get(value, "schema")),
example=load_schema(_get(value, "example")),
form_params=_get(value, "formParameters"),
config=root.config,
errors=root.errors
)
body_objects.append(body)
return body_objects or None
def responses(data):
response_objects = []
_responses = _get(data, "responses", {})
if _get(v, "type"):
_responses = _get_inherited_item(_responses, "responses",
resource_types, meth, v)
for key, value in list(iteritems(_responses)):
_headers = _get(_get(data, "responses", {}), key, {})
_headers = _get(_headers, "headers", {})
header_objs = _create_base_param_obj(_headers, Header,
root.config, root.errors)
if header_objs:
for h in header_objs:
h.method = method(meth)
response = Response(
code=key,
raw={key: value},
desc=_get(value, "description"),
headers=header_objs,
body=body(value),
config=root.config,
method=method(meth),
errors=root.errors
)
response_objects.append(response)
if response_objects:
return sorted(response_objects, key=lambda x: x.code)
return None
def uri_params(data):
uri_params = _get_attribute_dict(data, "uriParameters", v)
if _get(v, "type"):
uri_params = _get_inherited_type_params(v, "uriParameters",
uri_params, resource_types)
return _create_base_param_obj(uri_params,
URIParameter,
root.config,
root.errors)
def base_uri_params(data):
uri_params = _get_attribute_dict(data, "baseUriParameters", v)
return _create_base_param_obj(uri_params,
URIParameter,
root.config,
root.errors)
def query_params(data):
query_params = _get_attribute_dict(data, "queryParameters", v)
if _get(v, "type"):
query_params = _get_inherited_type_params(v, "queryParameters",
query_params,
resource_types)
return _create_base_param_obj(query_params,
QueryParameter,
root.config,
root.errors)
def form_params(data):
form_params = _get_attribute_dict(data, "formParameters", v)
if _get(v, "type"):
form_params = _get_inherited_type_params(v, "formParameters",
form_params,
resource_types)
return _create_base_param_obj(form_params,
FormParameter,
root.config,
root.errors)
def description():
# prefer the resourceType method description
if meth:
method_attr = _get(v, meth)
desc = _get(method_attr, "description")
return desc or _get(v, "description")
return _get(v, "description")
def type_():
return _get(v, "type")
def method(meth):
if not meth:
return None
if "?" in meth:
return meth[:-1]
return meth
def optional():
if meth:
return "?" in meth
def protocols(data):
m, r = _get_res_type_attribute(v, data, "protocols", None)
return m or r or root.protocols
def is_(data):
m, r = _get_res_type_attribute(v, data, "is", default=[])
return m + r or None
def traits(data):
assigned = is_(data)
if assigned:
if root.traits:
trait_objs = []
for trait in assigned:
obj = [t for t in root.traits if t.name == trait]
if obj:
trait_objs.append(obj[0])
return trait_objs or None
def secured_by(data):
m, r = _get_res_type_attribute(v, data, "securedBy", [])
return m + r or None
def security_schemes_(data):
secured = secured_by(data)
return security_schemes(secured, root)
def wrap(key, data, meth, _v):
return ResourceTypeNode(
name=key,
raw=data,
root=root,
headers=headers(data),
body=body(data),
responses=responses(data),
uri_params=uri_params(data),
base_uri_params=base_uri_params(data),
query_params=query_params(data),
form_params=form_params(data),
media_type=_get(v, "mediaType"),
desc=description(),
type=type_(),
method=method(meth),
usage=_get(v, "usage"),
optional=optional(),
is_=is_(data),
traits=traits(data),
secured_by=secured_by(data),
security_schemes=security_schemes_(data),
display_name=_get(data, "displayName", key),
protocols=protocols(data),
errors=root.errors
)
resource_types = _get(raml_data, "resourceTypes", [])
resource_type_objects = []
child_res_type_objects = []
child_res_type_names = []
for res in resource_types:
for k, v in list(iteritems(res)):
if isinstance(v, dict):
if "type" in list(iterkeys(v)):
child_res_type_objects.append({k: v})
child_res_type_names.append(k)
else:
for meth in list(iterkeys(v)):
if meth in accepted_methods:
method_data = _get(v, meth, {})
resource = wrap(k, method_data, meth, v)
resource_type_objects.append(resource)
else:
meth = None
resource = wrap(k, {}, meth, v)
resource_type_objects.append(resource)
while child_res_type_objects:
child = child_res_type_objects.pop()
name = list(iterkeys(child))[0]
data = list(itervalues(child))[0]
parent = data.get("type")
if parent in child_res_type_names:
continue
p_data = [r for r in resource_types if list(iterkeys(r))[0] == parent]
p_data = p_data[0].get(parent)
res_data = _get_data_union(data, p_data)
for meth in list(iterkeys(res_data)):
if meth in accepted_methods:
method_data = _get(res_data, meth, {})
comb_data = dict(list(iteritems(method_data)) +
list(iteritems(res_data)))
resource = ResourceTypeNode(
name=name,
raw=res_data,
root=root,
headers=headers(method_data),
body=body(method_data),
responses=responses(method_data),
uri_params=uri_params(comb_data),
base_uri_params=base_uri_params(comb_data),
query_params=query_params(method_data),
form_params=form_params(method_data),
media_type=_get(v, "mediaType"),
desc=description(),
type=_get(res_data, "type"),
method=method(meth),
usage=_get(res_data, "usage"),
optional=optional(),
is_=is_(res_data),
traits=traits(res_data),
secured_by=secured_by(res_data),
security_schemes=security_schemes_(res_data),
display_name=_get(method_data, "displayName", name),
protocols=protocols(res_data),
errors=root.errors
)
resource_type_objects.append(resource)
return resource_type_objects or None
def create_resources(node, resources, root, parent):
"""
Recursively traverses the RAML file via DFS to find each resource
endpoint.
:param dict node: Dictionary of node to traverse
:param list resources: List of collected ``ResourceNode`` s
:param RootNode root: The ``RootNode`` of the API
:param ResourceNode parent: Parent ``ResourceNode`` of current ``node``
:returns: List of :py:class:`.raml.ResourceNode` objects.
"""
for k, v in list(iteritems(node)):
if k.startswith("/"):
avail = _get(root.config, "http_optional")
methods = [m for m in avail if m in list(iterkeys(v))]
if "type" in list(iterkeys(v)):
assigned = _resource_type_lookup(_get(v, "type"), root)
if hasattr(assigned, "method"):
if not assigned.optional:
methods.append(assigned.method)
methods = list(set(methods))
if methods:
for m in methods:
child = create_node(name=k,
raw_data=v,
method=m,
parent=parent,
root=root)
resources.append(child)
# inherit resource type methods
elif "type" in list(iterkeys(v)):
if hasattr(assigned, "method"):
method = assigned.method
else:
method = None
child = create_node(name=k,
raw_data=v,
method=method,
parent=parent,
root=root)
resources.append(child)
else:
child = create_node(name=k,
raw_data=v,
method=None,
parent=parent,
root=root)
resources.append(child)
resources = create_resources(child.raw, resources, root, child)
return resources
def create_node(name, raw_data, method, parent, root):
"""
Create a Resource Node object.
:param str name: Name of resource node
:param dict raw_data: Raw RAML data associated with resource node
:param str method: HTTP method associated with resource node
:param ResourceNode parent: Parent node object of resource node, if any
:param RootNode api: API ``RootNode`` that the resource node is attached to
:returns: :py:class:`.raml.ResourceNode` object
"""
#####
# Node attribute functions
#####
def path():
"""Set resource's relative URI path."""
parent_path = ""
if parent:
parent_path = parent.path
return parent_path + name
def absolute_uri():
"""Set resource's absolute URI path."""
uri = root.base_uri + path()
proto = protocols()
if proto:
uri = uri.split("://")
if len(uri) == 2:
uri = uri[1]
if root.protocols:
_proto = list(set(root.protocols) & set(proto))
# if resource protocols and root protocols share a protocol
# then use that one
if _proto:
uri = _proto[0].lower() + "://" + uri
# if no shared protocols, use the first of the resource
# protocols
else:
uri = proto[0].lower() + "://" + uri
return uri
def protocols():
"""Set resource's supported protocols."""
# trait = _get_trait("protocols", root, is_())
kwargs = dict(root=root,
is_=is_(),
type_=type_(),
method=method,
data=raw_data,
parent=parent)
objects_to_inherit = [
"traits", "types", "method", "resource", "parent"
]
inherited = get_inherited("protocols", objects_to_inherit, **kwargs)
trait = inherited["traits"]
r_type = inherited["types"]
meth = inherited["method"]
res = inherited["resource"]
parent_ = inherited["parent"]
default = [root.base_uri.split("://")[0].upper()]
return meth or r_type or trait or res or parent_ or default
def headers():
"""Set resource's supported headers."""
headers = _get_attribute("headers", method, raw_data)
header_objs = _get_inherited_attribute("headers", root, type_(),
method, is_())
_headers = _create_base_param_obj(headers,
Header,
root.config,
root.errors,
method=method)
if _headers is None:
return header_objs or None
return _remove_duplicates(header_objs, _headers)
def body():
"""Set resource's supported request/response body."""
bodies = _get_attribute("body", method, raw_data)
body_objects = _get_inherited_attribute("body", root, type_(),
method, is_())
_body_objs = []
for k, v in list(iteritems(bodies)):
if v is None:
continue
body = Body(
mime_type=k,
raw={k: v},
schema=load_schema(_get(v, "schema")),
example=load_schema(_get(v, "example")),
form_params=_get(v, "formParameters"),
config=root.config,
errors=root.errors
)
_body_objs.append(body)
if _body_objs == []:
return body_objects or None
return _remove_duplicates(body_objects, _body_objs)
def responses():
"""Set resource's expected responses."""
def resp_headers(headers):
"""Set response headers."""
header_objs = []
for k, v in list(iteritems(headers)):
header = Header(
name=k,
display_name=_get(v, "displayName", default=k),
method=method,
raw=headers,
type=_get(v, "type", default="string"),
desc=_get(v, "description"),
example=_get(v, "example"),
default=_get(v, "default"),
minimum=_get(v, "minimum"),
maximum=_get(v, "maximum"),
min_length=_get(v, "minLength"),
max_length=_get(v, "maxLength"),
enum=_get(v, "enum"),
repeat=_get(v, "repeat", default=False),
pattern=_get(v, "pattern"),
config=root.config,
errors=root.errors
)
header_objs.append(header)
return header_objs or None
def resp_body(body):
"""Set response body."""
body_list = []
default_body = {}
for (key, spec) in body.items():
if key not in MEDIA_TYPES:
# if a root mediaType was defined, the response body
# may omit the mime_type definition
if key in ('schema', 'example'):
default_body[key] = load_schema(spec) if spec else {}
else:
mime_type = key
# spec might be '!!null'
raw = spec or body
_schema = {}
_example = {}
if spec:
_schema_spec = _get(spec, 'schema', '')
_example_spec = _get(spec, 'example', '')
if _schema_spec:
_schema = load_schema(_schema_spec)
if _example_spec:
_example = load_schema(_example_spec)
body_list.append(Body(
mime_type=mime_type,
raw=raw,
schema=_schema,
example=_example,
form_params=None,
config=root.config,
errors=root.errors
))
if default_body:
body_list.append(Body(
mime_type=root.media_type,
raw=body,
schema=_get(default_body, 'schema'),
example=_get(default_body, 'example'),
form_params=None,
config=root.config,
errors=root.errors
))
return body_list or None
resps = _get_attribute("responses", method, raw_data)
type_resp = _get_resource_type("responses", root, type_(), method)
trait_resp = _get_trait("responses", root, is_())
resp_objs = type_resp + trait_resp
resp_codes = [r.code for r in resp_objs]
for k, v in list(iteritems(resps)):
if k in resp_codes:
resp = [r for r in resp_objs if r.code == k][0]
index = resp_objs.index(resp)
inherit_resp = resp_objs.pop(index)
headers = resp_headers(_get(v, "headers", default={}))
if inherit_resp.headers:
headers = _remove_duplicates(inherit_resp.headers, headers)
# if headers:
# headers.extend(inherit_resp.headers)
# else:
# headers = inherit_resp.headers
body = resp_body(_get(v, "body", {}))
if inherit_resp.body:
body = _remove_duplicates(inherit_resp.body, body)
# if body:
# body.extend(inherit_resp.body)
# else:
# body = inherit_resp.body
resp = Response(
code=k,
raw={k: v}, # should prob get data union
method=method,
desc=_get(v, "description") or inherit_resp.desc,
headers=headers,
body=body,
config=root.config,
errors=root.errors
)
resp_objs.insert(index, resp) # preserve order
else:
_headers = _get(v, "headers", default={})
_body = _get(v, "body", default={})
resp = Response(
code=k,
raw={k: v},
method=method,
desc=_get(v, "description"),
headers=resp_headers(_headers),
body=resp_body(_body),
config=root.config,
errors=root.errors
)
resp_objs.append(resp)
return resp_objs or None
def uri_params():
"""Set resource's URI parameters."""
unparsed_attr = "uriParameters"
parsed_attr = "uri_params"
root_params = root.uri_params
params = _create_uri_params(unparsed_attr, parsed_attr, root_params,
root, type_(), is_(), method, raw_data,
parent)
declared = []
base = base_uri_params()
if base:
declared = [p.name for p in base]
return _preserve_uri_order(absolute_uri(), params, root.config,
root.errors, declared)
def base_uri_params():
"""Set resource's base URI parameters."""
root_params = root.base_uri_params
kw = dict(type=type_(), is_=is_(), root_params=root_params)
params = set_params(raw_data, "base_uri_params", root, method,
inherit=True, **kw)
declared = []
uri = root.uri_params
base = root.base_uri_params
if uri:
declared = [p.name for p in uri]
if base:
declared.extend([p.name for p in base])
return _preserve_uri_order(root.base_uri, params, root.config,
root.errors, declared)
def query_params():
kw = dict(type_=type_(), is_=is_())
return set_params(raw_data, "query_params", root, method,
inherit=True, **kw)
def form_params():
"""Set resource's form parameters."""
kw = dict(type_=type_(), is_=is_())
return set_params(raw_data, "form_params", root, method,
inherit=True, **kw)
def media_type_():
"""Set resource's supported media types."""
if method is None:
return None
kwargs = dict(root=root,
is_=is_(),
type_=type_(),
method=method,
data=raw_data)
objects_to_inherit = [
"method", "traits", "types", "resource", "root"
]
inherited = get_inherited("mediaType", objects_to_inherit, **kwargs)
meth = inherited.get("method")
trait = inherited.get("trait")
r_type = inherited.get("types")
res = inherited.get("resource")
root_ = inherited.get("root")
return meth or trait or r_type or res or root_
def description():
"""Set resource's description."""
desc = _get(raw_data, "description")
try:
desc = _get(_get(raw_data, method), "description")
if desc is None:
raise AttributeError
except AttributeError:
if type_():
assigned = _resource_type_lookup(type_(), root)
try:
if assigned.method == method:
desc = assigned.description.raw
except AttributeError:
pass
else:
desc = _get(raw_data, "description")
return desc
def is_():
"""Set resource's assigned trait names."""
is_list = []
res_level = _get(raw_data, "is")
if res_level:
assert isinstance(res_level, list), "Error parsing trait"
is_list.extend(res_level)
method_level = _get(raw_data, method, {})
if method_level:
method_level = _get(method_level, "is")
if method_level:
assert isinstance(method_level, list), "Error parsing trait"
is_list.extend(method_level)
return is_list or None
def traits():
"""Set resource's assigned trait objects."""
assigned = is_()
if assigned:
if root.traits:
trait_objs = []
for trait in assigned:
obj = [t for t in root.traits if t.name == trait]
if obj:
trait_objs.append(obj[0])
return trait_objs or None
# TODO: wow this function sucks.
def type_():
"""Set resource's assigned resource type name."""
__get_method = _get(raw_data, method, {})
assigned_type = _get(__get_method, "type")
if assigned_type:
if not isinstance(assigned_type, dict):
return assigned_type
return list(iterkeys(assigned_type))[0] # NOCOV
assigned_type = _get(raw_data, "type")
if isinstance(assigned_type, dict):
return list(iterkeys(assigned_type))[0] # NOCOV
return assigned_type
def resource_type():
"""Set resource's assigned resource type objects."""
if type_() and root.resource_types:
assigned_name = type_()
res_types = root.resource_types
type_obj = [r for r in res_types if r.name == assigned_name]
if type_obj:
return type_obj[0]
def secured_by():
"""
Set resource's assigned security scheme names and related paramters.
"""
if method is not None:
method_level = _get(raw_data, method, {})
if method_level:
secured_by = _get(method_level, "securedBy")
if secured_by:
return secured_by
resource_level = _get(raw_data, "securedBy")
if resource_level:
return resource_level
root_level = root.secured_by
if root_level:
return root_level
def security_schemes_():
"""Set resource's assigned security scheme objects."""
secured = secured_by()
return security_schemes(secured, root)
node = ResourceNode(
name=name,
raw=raw_data,
method=method,
parent=parent,
root=root,
display_name=_get(raw_data, "displayName", name),
path=path(),
absolute_uri=absolute_uri(),
protocols=protocols(),
headers=headers(),
body=body(),
responses=responses(),
uri_params=uri_params(),
base_uri_params=base_uri_params(),
query_params=query_params(),
form_params=form_params(),
media_type=media_type_(),
desc=description(),
is_=is_(),
traits=traits(),
type=type_(),
resource_type=resource_type(),
secured_by=secured_by(),
security_schemes=security_schemes_(),
errors=root.errors
)
if resource_type():
# correct inheritance (issue #23)
node._inherit_type()
return node
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for strided_slice operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.lite.testing.zip_test_utils import TF_TYPE_INFO
def _make_strided_slice_tests(options, test_parameters, expected_tf_failures=0):
"""Utility function to make strided_slice_tests based on parameters."""
def build_graph(parameters):
"""Build graph for stride_slice test."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_indices"]:
begin = parameters["begin"]
end = parameters["end"]
strides = parameters["strides"]
tensors = [input_tensor]
else:
begin = tf.compat.v1.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["begin"])])
end = tf.compat.v1.placeholder(
dtype=parameters["index_type"],
name="end",
shape=[len(parameters["end"])])
strides = None
if parameters["strides"] is not None:
strides = tf.compat.v1.placeholder(
dtype=parameters["index_type"],
name="strides",
shape=[len(parameters["strides"])])
tensors = [input_tensor, begin, end]
if strides is not None:
tensors.append(strides)
out = tf.strided_slice(
input_tensor,
begin,
end,
strides,
begin_mask=parameters["begin_mask"],
end_mask=parameters["end_mask"])
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for stride_slice test."""
input_values = create_tensor_data(
parameters["dtype"],
parameters["input_shape"],
min_value=-1,
max_value=1)
index_type = TF_TYPE_INFO[parameters["index_type"]][0]
values = [input_values]
if not parameters["constant_indices"]:
begin_values = np.array(parameters["begin"]).astype(index_type)
end_values = np.array(parameters["end"]).astype(index_type)
stride_values = (
np.array(parameters["strides"]).astype(index_type)
if parameters["strides"] is not None else None)
values.append(begin_values)
values.append(end_values)
if stride_values is not None:
values.append(stride_values)
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
@register_make_test_function()
def make_strided_slice_tests(options):
"""Make a set of tests to do strided_slice."""
# TODO(soroosh): add test/support for uint8.
test_parameters = [
# 4-D (basic cases with const/non-const indices).
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.bool],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin": [[0, 0, 0, 0]],
"end": [[12, 2, 2, 5]],
"begin_mask": [None],
"end_mask": [None],
"shrink_axis_mask": [None],
"constant_indices": [False, True],
"fully_quantize": [False],
},
# 4-D with non-trivial begin & end.
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"end": [[8, 2, 2, 3], [12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin_mask": [None, 8],
"end_mask": [None, 3],
"shrink_axis_mask": [None, 15, -1],
"constant_indices": [True],
"fully_quantize": [False],
},
# Begin, end, strides dim are different from input shape
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0]],
"end": [[1]],
"strides": [None, [1]],
"begin_mask": [0],
"end_mask": [0],
"shrink_axis_mask": [1],
"constant_indices": [True, False],
"fully_quantize": [False],
},
# 2-D
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, 0]],
"end": [[2, 2]],
"strides": [None, [2, 2]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False, True],
"fully_quantize": [False],
},
# Negative strides
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, -1]],
"end": [[2, -3]],
"strides": [[1, -1]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False],
"fully_quantize": [False],
},
# 4-D (cases with const indices and batchsize of 1).
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[1, 2, 2, 5]],
"strides": [None, [1, 1, 1, 1]],
"begin": [[0, 0, 0, 0], [0, 1, 1, 3]],
"end": [[1, 2, 2, 5], [1, 2, 2, 4]],
"begin_mask": [None],
"end_mask": [None],
"shrink_axis_mask": [None],
"constant_indices": [True],
"fully_quantize": [True],
},
# Begin, end, strides dim are different from input shape
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0]],
"end": [[1]],
"strides": [None, [1]],
"begin_mask": [0],
"end_mask": [0],
"shrink_axis_mask": [1],
"constant_indices": [True],
"fully_quantize": [True],
},
]
if options.use_experimental_converter:
test_parameters = test_parameters + [
# Begin equal to input dim.
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[1, 1, 2]],
"begin": [[1]],
"end": [[0]],
"strides": [[1]],
"begin_mask": [0],
"end_mask": [1],
"shrink_axis_mask": [0],
"constant_indices": [True, False],
"fully_quantize": [False],
},
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[1, 1, 2]],
"begin": [[1, 0, 0]],
"end": [[0, -1, -1]],
"strides": [[1, 1, 1]],
"begin_mask": [6],
"end_mask": [7],
"shrink_axis_mask": [0],
"constant_indices": [True, False],
"fully_quantize": [False],
},
# String input.
{
"dtype": [tf.string],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0]],
"end": [[8, 2, 2, 3]],
"strides": [[2, 1, 3, 1]],
"begin_mask": [8],
"end_mask": [3],
"shrink_axis_mask": [None, -1],
"constant_indices": [True, False],
"fully_quantize": [False],
}
]
_make_strided_slice_tests(options, test_parameters, expected_tf_failures=2)
@register_make_test_function()
def make_strided_slice_1d_exhaustive_tests(options):
"""Make a set of exhaustive tests for 1D strided_slice."""
test_parameters = [
# 1-D Exhaustive
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[3]],
"begin": [[-2], [-1], [0], [1], [2]],
"end": [[-2], [-1], [0], [1], [2]],
"strides": [[-2], [-1], [1], [2]],
"begin_mask": [0, 1],
"end_mask": [0, 1],
"shrink_axis_mask": [0],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters)
| |
# Copyright (C) 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
##########################################################################
# Name: test_layer3_ft_subinterface_route.py
#
# Objective: To verify that subinterface route are installed in asic. Ping
# should pass only if route is installed.
#
# Topology: 2 switches connected by 1 interface and 2 hosts connected
# by 1 interface
#
##########################################################################
"""
OpenSwitch Tests for subinterface route test using hosts
"""
from pytest import mark
TOPOLOGY = """
# +-------+ +-------+ +-------+
# | | +-------+ +-------+ | | | |
# | hs1 <-----> sw1 <-----> sw2 <-----> hs2 | | hs3 |
# | | +-------+ +-------+ | | +--------
# +-------+ | +-------+ |
# |-----------------------|
# Nodes
[type=openswitch name="Switch 1"] sw1
[type=openswitch name="Switch 2"] sw2
[type=host name="Host 1"] hs1
[type=host name="Host 2"] hs2
[type=host name="Host 3"] hs3
# Links
hs1:1 -- sw1:1
sw1:2 -- sw2:2
sw2:4 -- hs2:1
sw2:3 -- hs3:1
"""
def configure_subinterface(sw, interface, ip_addr, vlan):
with sw.libs.vtysh.ConfigSubinterface(interface, vlan) as ctx:
ctx.no_shutdown()
ctx.ip_address(ip_addr)
ctx.encapsulation_dot1_q(vlan)
def turn_on_interface(sw, interface):
with sw.libs.vtysh.ConfigInterface(interface) as ctx:
ctx.no_shutdown()
def check_route(buf, network, nexthop, cli):
if cli:
for item in buf:
if item['id'] == network and\
item['next_hops'][0]['via'] == nexthop:
return True
else:
if network in buf and nexthop in buf:
return True
return False
def configure_l2_interface(sw, interface, vlan):
with sw.libs.vtysh.ConfigInterface(interface) as ctx:
ctx.no_shutdown()
ctx.no_routing()
ctx.vlan_access(vlan)
@mark.gate
@mark.platform_incompatible(['docker'])
def test_subinterface_route(topology):
"""Test description.
Topology:
[h1] <-----> [s1] <-----> [s2] <-----> [h2]
Objective:
Test if subinterface routes are installed in ASIC using ping.
Cases:
- Execute successful pings between hosts with static routes configured.
"""
sw1 = topology.get('sw1')
sw2 = topology.get('sw2')
hs1 = topology.get('hs1')
hs2 = topology.get('hs2')
hs3 = topology.get('hs3')
assert sw1 is not None
assert sw2 is not None
assert hs1 is not None
assert hs2 is not None
assert hs3 is not None
sw1p1 = sw1.ports["1"]
sw1p2 = sw1.ports["2"]
sw2p2 = sw2.ports["2"]
sw2p3 = sw2.ports["3"]
sw2p4 = sw2.ports["4"]
subinterface_vlan = '10'
sw1_subinterface_ip = '2.2.2.2'
sw2_subinterface_ip = '2.2.2.1'
h1_ip_address = '1.1.1.2'
h2_ip_address = '3.3.3.3'
h3_ip_address = '3.3.3.4'
sw1_l3_ip_address = '1.1.1.1'
sw2_l3_ip_address = '3.3.3.1'
mask = '/24'
print("Create subinterface in both switches")
configure_subinterface(sw1, sw1p2,
sw1_subinterface_ip + mask,
subinterface_vlan)
configure_subinterface(sw2, sw2p2,
sw2_subinterface_ip + mask,
subinterface_vlan)
print("Turning on all interfaces used in this test")
turn_on_interface(sw1, sw1p1)
turn_on_interface(sw1, sw1p2)
turn_on_interface(sw2, sw2p2)
turn_on_interface(sw2, sw2p4)
print("Configure IP and bring UP in host 1")
hs1.libs.ip.interface('1', addr=h1_ip_address + mask, up=True)
print("Adding routes on host 1")
hs1.libs.ip.add_route('3.3.3.0/24', '1.1.1.1')
print("Configure IP and bring UP in host 2")
hs2.libs.ip.interface('1', addr=h2_ip_address + mask, up=True)
print("Adding routes on host 2")
hs2.libs.ip.add_route('1.1.1.0/24', '3.3.3.1')
print("Configure IP and bring UP in host 3")
hs3.libs.ip.interface('1', addr=h3_ip_address + mask, up=True)
print("Configure L3 interface IP address on switch 1")
with sw1.libs.vtysh.ConfigInterface(sw1p1) as ctx:
ctx.ip_address(sw1_l3_ip_address + mask)
ctx.no_shutdown()
print("Adding routes on Switch 1")
with sw1.libs.vtysh.Configure() as ctx:
ctx.ip_route('3.3.3.0/24', '2.2.2.1')
print("Configure IP address on switch 2")
with sw2.libs.vtysh.ConfigInterface(sw2p4) as ctx:
ctx.ip_address(sw2_l3_ip_address + mask)
ctx.no_shutdown()
print("Adding routes on Switch 2")
with sw2.libs.vtysh.Configure() as ctx:
ctx.ip_route('1.1.1.0/24', '2.2.2.2')
print("Check routes")
pass_flag = 0
attemps = 3
while pass_flag == 0 and attemps > 0:
switch1_routes = sw1.libs.vtysh.show_ip_route()
switch2_routes = sw2.libs.vtysh.show_ip_route()
sw1_route = sw1("ip netns exec swns route", shell='bash')
sw2_route = sw2("ip netns exec swns route", shell='bash')
if check_route(switch1_routes, '3.3.3.0', '2.2.2.1', True) and\
check_route(switch2_routes, '1.1.1.0', '2.2.2.2', True) and\
check_route(sw1_route, '3.3.3.0', '2.2.2.1', False) and\
check_route(sw2_route, '1.1.1.0', '2.2.2.2', False):
pass_flag = 1
elif check_route(switch1_routes, '1.1.1.0', '2.2.2.2', True) and\
check_route(switch2_routes, '3.3.3.0', '2.2.2.1', True) and\
check_route(sw1_route, '1.1.1.0', '2.2.2.2', False) and\
check_route(sw2_route, '3.3.3.0', '2.2.2.1', False):
pass_flag = 1
else:
pass_flag = 0
attemps -= 1
assert pass_flag == 1, "Routes not configured"
print("Ping h1 to host 2")
pass_flag = 0
ping_num = 10
ping = hs1.libs.ping.ping(ping_num, h2_ip_address)
if ping['transmitted'] == ping['received'] == ping_num:
pass_flag = 1
if pass_flag == 0:
sw1.libs.vtysh.show_running_config()
sw1.libs.vtysh.show_ip_route()
sw1.libs.vtysh.show_interface(sw1p1)
sw1.libs.vtysh.show_interface(sw1p2)
sw2.libs.vtysh.show_running_config()
sw2.libs.vtysh.show_ip_route()
sw2.libs.vtysh.show_interface(sw2p2)
sw2.libs.vtysh.show_interface(sw2p4)
assert ping['transmitted'] == ping['received'],\
'Ping between ' + h1_ip_address + ' and ' + h2_ip_address + ' failed'
turn_on_interface(sw2, sw2p3)
print("Configure Vlan 10")
with sw2.libs.vtysh.ConfigVlan(subinterface_vlan) as ctx:
ctx.no_shutdown()
print("Configure l2 interface with vlan " + subinterface_vlan)
configure_l2_interface(sw2, sw2p3, subinterface_vlan)
print("Ping h2 to host 3")
ping = hs2.libs.ping.ping(10, h3_ip_address)
assert ping['received'] == 0,\
'Ping between ' + h1_ip_address + ' and ' + h2_ip_address + ' passed'
| |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
import unittest
from collections import defaultdict
from typing import Dict, List
from pants.backend.jvm.tasks.jvm_compile.execution_graph import (
ExecutionFailure,
ExecutionGraph,
Job,
JobExistsError,
NoRootJobError,
UnknownJobError,
)
class ImmediatelyExecutingPool:
num_workers = 1
def submit_async_work(self, work):
work.func(*work.args_tuples[0])
class PrintLogger:
def error(self, msg):
print(msg)
def debug(self, msg):
print(msg)
class CapturingLogger:
log_entries: Dict[str, List[str]] = defaultdict(list)
def error(self, msg: str) -> None:
self.log_entries["error"].append(msg)
def debug(self, msg: str) -> None:
self.log_entries["debug"].append(msg)
def passing_fn():
pass
def raising_fn():
raise Exception("I'm an error")
def base_error_raising_fn():
raise BaseException("I'm a BaseException")
def raising_wrapper():
raising_fn()
class ExecutionGraphTest(unittest.TestCase):
def setUp(self):
self.jobs_run = []
def execute(self, exec_graph):
exec_graph.execute(ImmediatelyExecutingPool(), PrintLogger())
def job(self, name, fn, dependencies, size=0, on_success=None, on_failure=None):
def recording_fn():
self.jobs_run.append(name)
fn()
return Job(name, recording_fn, dependencies, size, on_success, on_failure)
def test_single_job(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [])], False)
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A"])
def test_single_dependency(self):
exec_graph = ExecutionGraph(
[self.job("A", passing_fn, ["B"]), self.job("B", passing_fn, [])], False
)
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "A"])
def test_simple_binary_tree(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, ["B", "C"]),
self.job("B", passing_fn, []),
self.job("C", passing_fn, []),
],
False,
)
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "C", "A"])
def test_simple_linear_dependencies(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["C"]),
self.job("C", passing_fn, []),
],
False,
)
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["C", "B", "A"])
def test_simple_unconnected(self):
exec_graph = ExecutionGraph(
[self.job("A", passing_fn, []), self.job("B", passing_fn, []),], False
)
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B"])
def test_simple_unconnected_tree(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, []),
self.job("C", passing_fn, []),
],
False,
)
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["B", "C", "A"])
def test_dependee_depends_on_dependency_of_its_dependency(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, ["B", "C"]),
self.job("B", passing_fn, ["C"]),
self.job("C", passing_fn, []),
],
False,
)
self.execute(exec_graph)
self.assertEqual(["C", "B", "A"], self.jobs_run)
def test_one_failure_raises_exception(self):
exec_graph = ExecutionGraph([self.job("A", raising_fn, [])], False)
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Failed jobs: A", str(cm.exception))
def test_base_exception_failure_raises_exception(self):
# BaseException happens for lower level issues, not catching and propagating it makes debugging
# difficult.
exec_graph = ExecutionGraph([self.job("A", base_error_raising_fn, [])], False)
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Failed jobs: A", str(cm.exception))
def test_failure_of_dependency_does_not_run_dependents(self):
exec_graph = ExecutionGraph(
[self.job("A", passing_fn, ["F"]), self.job("F", raising_fn, [])], False
)
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual(["F"], self.jobs_run)
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_dependency_does_not_run_second_order_dependents(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, ["B"]),
self.job("B", passing_fn, ["F"]),
self.job("F", raising_fn, []),
],
False,
)
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual(["F"], self.jobs_run)
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_one_leg_of_tree_does_not_cancel_other(self):
# TODO do we want this behavior, or do we want to fail fast on the first failed job?
exec_graph = ExecutionGraph(
[
self.job("B", passing_fn, []),
self.job("F", raising_fn, ["B"]),
self.job("A", passing_fn, ["B"]),
],
False,
)
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertTrue(self.jobs_run == ["B", "F", "A"] or self.jobs_run == ["B", "A", "F"])
self.assertEqual("Failed jobs: F", str(cm.exception))
def test_failure_of_disconnected_job_does_not_cancel_non_dependents(self):
exec_graph = ExecutionGraph(
[self.job("A", passing_fn, []), self.job("F", raising_fn, [])], False
)
with self.assertRaises(ExecutionFailure):
self.execute(exec_graph)
self.assertEqual(["A", "F"], self.jobs_run)
def test_cycle_in_graph_causes_failure(self):
with self.assertRaises(NoRootJobError) as cm:
ExecutionGraph(
[self.job("A", passing_fn, ["B"]), self.job("B", passing_fn, ["A"])], False
)
self.assertEqual(
"Unexecutable graph: All scheduled jobs have dependencies. "
"There must be a circular dependency.",
str(cm.exception),
)
def test_non_existent_dependency_causes_failure(self):
with self.assertRaises(UnknownJobError) as cm:
ExecutionGraph([self.job("A", passing_fn, []), self.job("B", passing_fn, ["Z"])], False)
self.assertEqual("Unexecutable graph: Undefined dependencies 'Z'", str(cm.exception))
def test_on_success_callback_raises_error(self):
exec_graph = ExecutionGraph([self.job("A", passing_fn, [], on_success=raising_fn)], False)
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Error in on_success for A: I'm an error", str(cm.exception))
def test_on_failure_callback_raises_error(self):
exec_graph = ExecutionGraph([self.job("A", raising_fn, [], on_failure=raising_fn)], False)
with self.assertRaises(ExecutionFailure) as cm:
self.execute(exec_graph)
self.assertEqual("Error in on_failure for A: I'm an error", str(cm.exception))
def test_same_key_scheduled_twice_is_error(self):
with self.assertRaises(JobExistsError) as cm:
ExecutionGraph(
[self.job("Same", passing_fn, []), self.job("Same", passing_fn, [])], False
)
self.assertEqual("Unexecutable graph: Job already scheduled 'Same'", str(cm.exception))
def test_priorities_for_chain_of_jobs(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, [], 8),
self.job("B", passing_fn, ["A"], 4),
self.job("C", passing_fn, ["B"], 2),
self.job("D", passing_fn, ["C"], 1),
],
False,
)
self.assertEqual(exec_graph._job_priority, {"A": 15, "B": 7, "C": 3, "D": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B", "C", "D"])
def test_priorities_for_fork(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, [], 4),
self.job("B", passing_fn, ["A"], 2),
self.job("C", passing_fn, ["A"], 1),
],
False,
)
self.assertEqual(exec_graph._job_priority, {"A": 6, "B": 2, "C": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B", "C"])
def test_priorities_for_mirrored_fork(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, [], 4),
self.job("B", passing_fn, ["A"], 1),
self.job("C", passing_fn, ["A"], 2),
],
False,
)
self.assertEqual(exec_graph._job_priority, {"A": 6, "B": 1, "C": 2})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "C", "B"])
def test_priorities_for_diamond(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, [], 8),
self.job("B", passing_fn, ["A"], 4),
self.job("C", passing_fn, ["A"], 2),
self.job("D", passing_fn, ["B", "C"], 1),
],
False,
)
self.assertEqual(exec_graph._job_priority, {"A": 13, "B": 5, "C": 3, "D": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "B", "C", "D"])
def test_priorities_for_mirrored_diamond(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, [], 8),
self.job("B", passing_fn, ["A"], 2),
self.job("C", passing_fn, ["A"], 4),
self.job("D", passing_fn, ["B", "C"], 1),
],
False,
)
self.assertEqual(exec_graph._job_priority, {"A": 13, "B": 3, "C": 5, "D": 1})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "C", "B", "D"])
def test_priorities_for_skewed_diamond(self):
exec_graph = ExecutionGraph(
[
self.job("A", passing_fn, [], 1),
self.job("B", passing_fn, ["A"], 2),
self.job("C", passing_fn, ["B"], 4),
self.job("D", passing_fn, ["A"], 8),
self.job("E", passing_fn, ["C", "D"], 16),
],
False,
)
self.assertEqual(exec_graph._job_priority, {"A": 25, "B": 22, "C": 20, "D": 24, "E": 16})
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A", "D", "B", "C", "E"])
def test_jobs_not_canceled_multiple_times(self):
failures = list()
def collect_failure(jobname):
def fn():
failures.append(jobname)
return fn
def my_job(name, result_fn, deps):
return self.job(name, result_fn, deps, 1, on_failure=collect_failure(name))
exec_graph = ExecutionGraph(
[
my_job("A", raising_fn, []),
my_job("B1", passing_fn, ["A"]),
my_job("B2", passing_fn, ["A"]),
my_job("C1", passing_fn, ["B1", "B2"]),
my_job("C2", passing_fn, ["B1", "B2"]),
my_job("E", passing_fn, ["C2"]),
],
False,
)
with self.assertRaises(ExecutionFailure):
self.execute(exec_graph)
self.assertEqual(self.jobs_run, ["A"])
self.assertEqual(failures, ["A", "B1", "B2", "C1", "C2", "E"])
def test_dumps_stack_trace(self):
graph = ExecutionGraph([self.job("A", raising_wrapper, [])], True)
capturing_logger = CapturingLogger()
with self.assertRaises(ExecutionFailure):
graph.execute(ImmediatelyExecutingPool(), capturing_logger)
error_logs = capturing_logger.log_entries["error"]
self.assertEqual(2, len(error_logs), msg=f"Wanted one error log, got: {error_logs}")
regex = re.compile("A failed: I'm an error.*")
self.assertRegex(error_logs[0], regex)
regex = re.compile(
'Traceback:.*in raising_wrapper.*raise Exception\\("I\'m an error.*"\\)', re.DOTALL,
)
self.assertRegex(error_logs[1], regex)
| |
"""
solacehelper is a class to construct solace commands and sets of commands.
"""
import logging
from libsolace.SolaceAPI import SolaceAPI
try:
import simplejson as json
except ImportError:
import json
class SolaceProvision:
""" Provision the CLIENT_PROFILE, VPN, ACL_PROFILE, QUEUES and USERS
:type vpn_dict: dictionary
eg: {'owner': u'SolaceTest', 'spool_size': u'4096', 'password': u'd0nt_u5se_thIs', 'name': u'dev_testvpn'}
:type queue_dict: list
eg: [
{"exclusive": u"true", "type": "", "name": u"testqueue1", "queue_size": u"4096"},
{"exclusive": u"false", "type": "", "name": u"testqueue2", "queue_size": u"4096"}
]
:type environment: str
:type client_profile: str
:type users: list
:type testmode: bool
:type create_queues: bool
:type shutdown_on_apply: bool
:param vpn_dict: vpn dictionary
:param queue_dict: queue dictionary list
:param environment: name of environment
:param client_profile: name of client_profile, default='glassfish'
:param users: list of user dictionaries to provision
eg: [{'username': u'dev_marcom3', 'password': u'dev_marcompass'}]
:param testmode: only test, dont apply changes
:param create_queues: disable queue creation, default = True
:param shutdown_on_apply: force shutdown Queue and User for config change, default = False
"""
def __init__(self, **kwargs):
if kwargs == {}:
return
try:
self.vpn_dict = kwargs['vpn_dict']
self.vpn_name = self.vpn_dict['name']
self.queue_dict = kwargs['queue_dict']
self.environment_name = kwargs['environment']
self.client_profile_name = kwargs['client_profile']
self.users_dict = kwargs['users_dict']
logging.debug("USERS_DICT: %s" % self.users_dict )
self.testmode = kwargs['testmode']
self.create_queues = kwargs['create_queues']
self.shutdown_on_apply = kwargs['shutdown_on_apply']
self.version = kwargs['version']
self.detect_status = kwargs['detect_status']
except Exception, e:
raise KeyError('missing kwarg %s' % e)
logging.info("vpn_dict: %s" % self.vpn_dict)
logging.info("vpn_name: %s" % self.vpn_name)
logging.info("Command line SolOS-TR version override: %s" % self.version)
self.queueMgr = None
if self.testmode:
logging.info('TESTMODE ACTIVE')
# create a connection for RPC calls to the environment
self.connection = SolaceAPI(self.environment_name, testmode=self.testmode, version=self.version,
detect_status=self.detect_status)
# get version of semp TODO FIXME, this should not be needed after Plugin implemented
if self.version is None:
self.version = self.connection.version
else:
logging.warn("Overriding default semp version %s" % self.version)
self.version = self.version
logging.debug("VPN Data Node: %s" % json.dumps(str(self.vpn_dict), ensure_ascii=False))
# prepare vpn commands
self.vpn = self.connection.manage("SolaceVPN",
vpn_name=self.vpn_name,
owner_name=self.vpn_name,
max_spool_usage=self.vpn_dict['vpn_config']['spool_size'])
logging.info("Create VPN %s" % self.vpn_name)
for cmd in self.vpn.commands.commands:
logging.debug(str(cmd))
if not self.testmode:
self.connection.rpc(str(cmd[0]), **cmd[1])
# prepare the client_profile commands
self.client_profile = self.connection.manage("SolaceClientProfile", name=self.client_profile_name,
vpn_name=self.vpn_name, version=self.version)
# prepare acl_profile commands, we create a profile named the same as the VPN for simplicity
# self.acl_profile = SolaceACLProfile(self.environment_name, self.vpn_name, self.vpn_name, version=self.version)
self.acl_profile = self.connection.manage("SolaceACLProfile", name=self.vpn_name, vpn_name=self.vpn_name)
# prepare the user that owns this vpn
logging.info("self.vpn_name: %s" % self.vpn_name)
if not self._is_vpn_owner_user_present():
logging.debug("VPN owner user %s for VPN %s not present in CMDB, appending to the list of users to be created" % (self.vpn_name, self.vpn_name))
vpn_owner_user = [
{
'username': self.vpn_name,
'password': self.vpn_dict['password']
}
]
self.users_dict.extend(vpn_owner_user)
self.userMgr = self.connection.manage("SolaceUsers",
users=self.users_dict,
vpn_name=self.vpn_name,
client_profile=self.client_profile.name,
acl_profile=self.acl_profile.name,
testmode=self.testmode,
shutdown_on_apply=self.shutdown_on_apply)
# self.users = [self.connection.manage("SolaceUser",
# username = self.vpn_name,
# password = self.vpn_dict['password'],
# vpn_name = self.vpn_name,
# client_profile = self.client_profile.name,
# acl_profile = self.acl_profile.name,
# testmode = self.testmode,
# shutdown_on_apply = self.shutdown_on_apply)]
#
# logging.info("self.users: %s" % self.users)
# prepare the queues for the vpn ( if any )
try:
logging.info("Queue datanodes %s" % self.queue_dict)
if self.queue_dict is not None:
try:
logging.info("Stacking queue commands for VPN: %s" % self.vpn_name)
self.queueMgr = self.connection.manage("SolaceQueue",
vpn_name=self.vpn_name,
queues=self.queue_dict,
shutdown_on_apply=self.shutdown_on_apply)
except Exception, e:
raise
# raise BaseException("Something bad has happened which was unforseen by developers: %s" % e)
else:
logging.warning("No Queue dictionary was passed, disabling queue creation")
self.create_queues = False
except AttributeError:
logging.warning("No queue declaration for this vpn in site-config, skipping")
self.create_queues = False
raise
logging.info("Create Client Profile")
# Provision profile now already since we need to link to it.
for cmd in self.client_profile.commands.commands:
logging.debug(str(cmd))
if not self.testmode:
self.connection.rpc(str(cmd[0]), **cmd[1])
logging.info("Create ACL Profile for vpn %s" % self.vpn_name)
for cmd in self.acl_profile.commands.commands:
logging.debug(str(cmd))
if not self.testmode:
self.connection.rpc(str(cmd[0]), **cmd[1])
logging.info("Creating users for vpn %s" % self.vpn_name)
for cmd in self.userMgr.commands.commands:
logging.debug(cmd)
if not self.testmode:
self.connection.rpc(str(cmd[0]), **cmd[1])
logging.info("Create Queues Bool?: %s in %s" % (self.create_queues, self.vpn_name))
if self.create_queues:
logging.info("Create Queues for vpn %s" % self.vpn_name)
for cmd in self.queueMgr.commands.commands:
logging.debug(cmd)
if not self.testmode:
self.connection.rpc(str(cmd[0]), **cmd[1])
def __set_vpn_confg__(self):
try:
# Check if there is environment overide for VPN
for e in self.vpn_dict.env:
if e.name == self.environment_name:
logging.info('setting vpn_config to %s values' % e.name)
self.vpn_dict.vpn_config = e.vpn_config
logging.info("Spool Size: %s" % self.vpn_dict.vpn_config['spool_size'])
except:
logging.warning("No environment overides for vpn: %s" % self.vpn_dict.name)
pass
def _is_vpn_owner_user_present(self):
"""
Checks if the special vpn_owner user is already configured in the CMDB
Returns boolean
"""
return True in [True for x in self.users_dict if x["username"] == self.vpn_name]
| |
# Copyright 2013-2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for :mod:`dicts`"""
import copy
import re
import unittest
import mock
import six
from simpl.incubator import dicts
class TestSplitMergeDicts(unittest.TestCase):
"""Tests for split/merge dicts in :mod:`dicts`."""
def test_split_dict_simple(self):
fxn = dicts.split_dict
self.assertEqual(fxn({}), ({}, None))
combined = {
'innocuous': 'Hello!',
'password': 'secret',
}
innocuous = {'innocuous': 'Hello!'}
secret = {'password': 'secret'}
original = combined.copy()
self.assertEqual(fxn(combined, filter_keys=[]), (combined, None))
self.assertEqual(fxn(combined, ['password']), (innocuous, secret))
self.assertDictEqual(combined, original)
def test_split_dict_works_with_None_keys(self):
filter_keys = [re.compile('quux')]
data = {None: 'foobar'}
expected = (data, None)
self.assertEqual(expected,
dicts.split_dict(data, filter_keys))
def test_extract_data_expression_as_filter(self):
data = {
"employee": {
"name": "Bob",
"title": "Mr.",
"public_key": "rsa public key",
"private_key": "a private key",
"password": "password",
"position": "left"
},
"server": {
"access": {
"rootpassword": "password",
"server_privatekey": "private_key",
"server_public_key": "public_key"
},
"private_ip": "123.45.67.89",
"public_ip": "127.0.0.1",
"host_name": "server1"
},
"safe_val": "hithere",
"secret_value": "Immasecret"
}
safe = {
"employee": {
"name": "Bob",
"title": "Mr.",
"public_key": "rsa public key",
"position": "left"
},
"server": {
"access": {
"server_public_key": "public_key"
},
"private_ip": "123.45.67.89",
"public_ip": "127.0.0.1",
"host_name": "server1"
},
"safe_val": "hithere",
}
secret = {
"employee": {
"private_key": "a private key",
"password": "password",
},
"server": {
"access": {
"rootpassword": "password",
"server_privatekey": "private_key",
}
},
"secret_value": "Immasecret"
}
original_dict = copy.deepcopy(data)
secret_keys = ["secret_value", re.compile("password"),
re.compile("priv(?:ate)?[-_ ]?key$")]
body, hidden = dicts.split_dict(data, secret_keys)
self.assertDictEqual(body, safe)
self.assertDictEqual(secret, hidden)
dicts.merge_dictionary(body, hidden)
self.assertDictEqual(original_dict, body)
def test_split_dict_complex(self):
fxn = dicts.split_dict
self.assertEqual(fxn({}), ({}, None))
combined = {
'innocuous': {
'names': ['Tom', 'Richard', 'Harry']
},
'data': {
'credentials': [{'password': 'secret', 'username': 'joe'}],
'id': 1000,
'list_with_only_cred_objects': [{'password': 'secret'}],
'list_with_some_cred_objects': [
{
'password': 'secret',
'type': 'password',
},
'scalar',
{'name': 'joe'}
]
}
}
innocuous = {
'innocuous': {
'names': ['Tom', 'Richard', 'Harry']
},
'data': {
'id': 1000,
'list_with_some_cred_objects': [
{
'type': 'password'
},
'scalar',
{'name': 'joe'}
]
}
}
secret = {
'data': {
'credentials': [{'password': 'secret', 'username': 'joe'}],
'list_with_only_cred_objects': [{'password': 'secret'}],
'list_with_some_cred_objects': [
{
'password': 'secret'
},
None,
{}
]
}
}
original = combined.copy()
not_secret, is_secret = fxn(combined, [])
self.assertDictEqual(not_secret, combined)
self.assertIsNone(is_secret)
not_secret, is_secret = fxn(combined, ['credentials', 'password'])
self.assertDictEqual(not_secret, innocuous)
self.assertDictEqual(is_secret, secret)
self.assertDictEqual(combined, original)
merged = dicts.merge_dictionary(innocuous, secret)
self.assertDictEqual(original, merged)
def test_extract_and_merge(self):
fxn = dicts.split_dict
data = {
'empty_list': [],
'empty_object': {},
'null': None,
'list_with_empty_stuff': [{}, None, []],
'object_with_empty_stuff': {"o": {}, "n": None, 'l': []},
"tree": {
"array": [
{
"blank": {},
"scalar": 1
}
]
}
}
result, _ = fxn(data, [])
self.assertDictEqual(data, result)
merge = dicts.merge_dictionary(data, data)
self.assertDictEqual(data, merge)
merge = dicts.merge_dictionary(data, {})
self.assertDictEqual(data, merge)
merge = dicts.merge_dictionary({}, data)
self.assertDictEqual(data, merge)
def test_merge_dictionary(self):
dst = dict(
a=1, # not in source
b=2, # changed by source
c=dict( # deep merge check
ca=31,
cc=33,
cd=dict(cca=1)
),
d=4,
f=6,
g=7,
i=[], # add to empty list
k=[3, 4],
l=[[], [{'s': 1}]]
)
src = dict(
b='u2',
c=dict(
cb='u32',
cd=dict(
cda=dict(
cdaa='u3411',
cdab='u3412'
)
)
),
e='u5',
h=dict(i='u4321'),
i=[1],
j=[1, 2],
l=[None, [{'t': 8}]]
)
result = dicts.merge_dictionary(dst, src)
self.assertIsInstance(result, dict)
self.assertEqual(result['a'], 1)
self.assertEqual(result['d'], 4)
self.assertEqual(result['f'], 6)
self.assertEqual(result['b'], 'u2')
self.assertEqual(result['e'], 'u5')
self.assertIs(result['c'], dst['c'])
self.assertIs(result['c']['cd'], dst['c']['cd'])
self.assertEqual(result['c']['cd']['cda']['cdaa'], 'u3411')
self.assertEqual(result['c']['cd']['cda']['cdab'], 'u3412')
self.assertEqual(result['g'], 7)
self.assertIs(src['h'], result['h'])
self.assertEqual(result['i'], [1])
self.assertEqual(result['j'], [1, 2])
self.assertEqual(result['k'], [3, 4])
self.assertEqual(result['l'], [[], [{'s': 1, 't': 8}]])
def test_merge_lists(self):
dst = [[], [2], [None, 4]]
src = [[1], [], [3, None]]
result = dicts.merge_lists(dst, src)
self.assertIsInstance(result, list)
self.assertEqual(result[0], [1])
self.assertEqual(result[1], [2])
self.assertEqual(result[2], [3, 4], "Found: %s" % result[2])
def test_merge_dictionary_extend(self):
dst = dict(
a=[],
b=[1],
d=['a', 'b'],
e=[1, 2, 3, 4]
)
src = dict(
a=[1], # append
b=[1, 2], # extend existing list
c=[1], # add new list
d=[None, None, 'c'],
e=[1, 2]
)
result = dicts.merge_dictionary(dst, src, extend_lists=True)
self.assertIsInstance(result, dict)
self.assertEqual(result['a'], [1])
self.assertEqual(result['b'], [1, 2])
self.assertEqual(result['c'], [1])
self.assertEqual(result['d'], ['a', 'b', None, None, 'c'])
self.assertEqual(result['e'], [1, 2, 3, 4])
class TestDictPaths(unittest.TestCase):
"""Tests for :mod:`dicts` functions using paths as keys."""
def test_write_path(self):
cases = [
{
'name': 'scalar at root',
'start': {},
'path': 'root',
'value': 'scalar',
'expected': {'root': 'scalar'}
}, {
'name': 'int at root',
'start': {},
'path': 'root',
'value': 10,
'expected': {'root': 10}
}, {
'name': 'bool at root',
'start': {},
'path': 'root',
'value': True,
'expected': {'root': True}
}, {
'name': 'value at two piece path',
'start': {},
'path': 'root/subfolder',
'value': True,
'expected': {'root': {'subfolder': True}}
}, {
'name': 'value at multi piece path',
'start': {},
'path': 'one/two/three',
'value': {},
'expected': {'one': {'two': {'three': {}}}}
}, {
'name': 'add to existing',
'start': {'root': {'exists': True}},
'path': 'root/new',
'value': False,
'expected': {'root': {'exists': True, 'new': False}}
}, {
'name': 'overwrite existing',
'start': {'root': {'exists': True}},
'path': 'root/exists',
'value': False,
'expected': {'root': {'exists': False}}
}
]
for case in cases:
result = case['start']
dicts.write_path(result, case['path'], case['value'])
self.assertEqual(result, case['expected'], msg=case['name'])
def test_read_path(self):
cases = [
{
'name': 'simple value',
'start': {'root': 1},
'path': 'root',
'expected': 1
}, {
'name': 'simple path',
'start': {'root': {'folder': 2}},
'path': 'root/folder',
'expected': 2
}, {
'name': 'blank path',
'start': {'root': 1},
'path': '',
'expected': None
}, {
'name': '/ only',
'start': {'root': 1},
'path': '/',
'expected': None
}, {
'name': 'extra /',
'start': {'root': 1},
'path': '/root/',
'expected': 1
}, {
'name': 'nonexistent root',
'start': {'root': 1},
'path': 'not-there',
'expected': None
}, {
'name': 'nonexistent path',
'start': {'root': 1},
'path': 'root/not/there',
'expected': None
}, {
'name': 'empty source',
'start': {},
'path': 'root',
'expected': None
},
]
for case in cases:
result = dicts.read_path(case['start'], case['path'])
self.assertEqual(result, case['expected'], msg=case['name'])
def test_path_exists(self):
cases = [
{
'name': 'simple value',
'start': {'root': 1},
'path': 'root',
'expected': True
}, {
'name': 'simple path',
'start': {'root': {'folder': 2}},
'path': 'root/folder',
'expected': True
}, {
'name': 'blank path',
'start': {'root': 1},
'path': '',
'expected': False
}, {
'name': '/ only',
'start': {'root': 1},
'path': '/',
'expected': True
}, {
'name': 'extra /',
'start': {'root': 1},
'path': '/root/',
'expected': True
}, {
'name': 'nonexistent root',
'start': {'root': 1},
'path': 'not-there',
'expected': False
}, {
'name': 'nonexistent path',
'start': {'root': 1},
'path': 'root/not-there',
'expected': False
}, {
'name': 'empty source',
'start': {},
'path': 'root',
'expected': False
},
]
for case in cases:
result = dicts.path_exists(case['start'], case['path'])
self.assertEqual(result, case['expected'], msg=case['name'])
if __name__ == '__main__':
unittest.main()
| |
__file__ = 'IRI_v1'
__date__ = '5/15/2014'
__author__ = 'ABREZNIC'
import arcpy, os, datetime, csv, tpp
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
input = arcpy.GetParameterAsText(0)
calRhino = arcpy.GetParameterAsText(1)
output = arcpy.GetParameterAsText(2)
# theMXD = "C:\\TxDOT\\Projects\\IRI_dan\\working\\Untitled.mxd"
inputlist = input.split(";")
inputcntr = 1
lengthinput = len(inputlist)
issuesReport = [["DISTRICT_FILE", "ROUTE_ID", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH", "IRI", "RUTTING", "DATE", "ERROR_DESCRIPTION"]]
statsReport = [["DISTRICT_FILE", "LG Record Count", "KG Record Count", "Total Records Count", "Input Record Count", "Lost Records Count", "LG Records Length", "KG Records Length", "Total Routed Length"]]
arcpy.CreateFileGDB_management(output, "RhinoLines.gdb")
rhinospace = output + os.sep + "RhinoLines.gdb"
rhino_lines = rhinospace + os.sep + "rhinolines"
arcpy.Copy_management(calRhino, rhino_lines)
# arcpy.AddField_management(rhino_lines, "FRM_DFO", "DOUBLE")
# arcpy.AddField_management(rhino_lines, "TO_DFO", "DOUBLE")
cursor = arcpy.da.UpdateCursor(rhino_lines, ["FRM_DFO", "TO_DFO", 'SHAPE@'])
for row in cursor:
bp = row[2].firstPoint.M
ep = row[2].lastPoint.M
bpNew = float(format(float(bp), '.3f'))
epNew = float(format(float(ep), '.3f'))
row[0] = bpNew
row[1] = epNew
cursor.updateRow(row)
del cursor
del row
arcpy.AddMessage("Calibrated RHINO copied local.")
arcpy.AddField_management(rhino_lines, "RTE_ORDER", "SHORT")
arcpy.AddField_management(rhino_lines, "FLAG", "TEXT", "", "", 30)
arcpy.AddMessage("Applying RTE_ORDER.")
cursor = arcpy.da.UpdateCursor(rhino_lines, ["RTE_ID", "FRM_DFO", "RTE_ORDER", "FLAG", "RU", "F_SYSTEM", "SEC_NHS", "HPMS"], "", "", "", (None, "ORDER BY RTE_ID ASC, FRM_DFO ASC"))
counter = 0
order = 1
previous = ""
for row in cursor:
current = row[0]
if counter == 0:
row[2] = order
elif counter != 0 and previous == current:
order += 1
row[2] = order
else:
order = 1
row[2] = order
previous = current
counter += 1
ru = int(row[4])
fs = int(row[5])
nhs = int(row[6])
row[3] = current + "-" + str(order) + "-" + str(ru) + "-" + str(fs) + "-" + str(nhs) + "-" + str(row[7])
cursor.updateRow(row)
del cursor
arcpy.AddMessage("RTE_ORDER applied.")
dictionary = {}
cursor = arcpy.da.SearchCursor(rhino_lines, ["FLAG", "FRM_DFO", "TO_DFO"])
for row in cursor:
flag = row[0]
odr = flag.split("-")[0] + "-" + flag.split("-")[1] + "-" + flag.split("-")[2]
fDFO = row[1]
tDFO = row[2]
dictionary[odr] = [fDFO, tDFO]
del cursor
for excel in inputlist:
distName = str(excel).split("\\")[-1]
if distName[-1] == "$":
distName = distName[:-1]
arcpy.AddMessage("Beginning " + str(inputcntr) + " of " + str(lengthinput) + ": " + distName)
arcpy.CreateFileGDB_management(output, "Wrkg" + str(inputcntr) + ".gdb")
workspace = output + os.sep + "Wrkg" + str(inputcntr) + ".gdb"
arcpy.AddMessage("Working database created.")
data = []
lg = []
fields = ["ROUTE_ID", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH", "IRI", "RUTTING", "DATE", "RU", "F_SYSTEM", "SEC_NHS", "HPMS"]
data.append(fields)
lg.append(fields)
# spref = "Coordinate Systems\\Geographic Coordinate Systems\\World\\GCS_WGS_1984.prj"
spref = "Coordinate Systems\\Geographic Coordinate Systems\\World\\WGS 1984.prj"
arcpy.MakeXYEventLayer_management(excel, "Long", "Lat", "pointEvents" + str(inputcntr), spref)
arcpy.AddMessage("Event Layer created.")
pntfeature = workspace + os.sep + "allPoints"
arcpy.CopyFeatures_management("pointEvents" + str(inputcntr), pntfeature)
arcpy.AddMessage("Point feature class created.")
initial = 0
ids = []
cursor = arcpy.da.SearchCursor(pntfeature, ["ROUTE_ID"])
for row in cursor:
id = row[0]
initial += 1
if id not in ids:
ids.append(id)
del cursor
del row
arcpy.AddMessage("RTE_IDs compiled.")
roadslayer = ""
pointslayer = ""
# mxd = arcpy.mapping.MapDocument(theMXD)
mxd = arcpy.mapping.MapDocument("CURRENT")
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.name == "rhinolines":
arcpy.mapping.RemoveLayer(df, lyr)
if lyr.name == "allPoints":
arcpy.mapping.RemoveLayer(df, lyr)
newlayerpnt = arcpy.mapping.Layer(pntfeature)
arcpy.mapping.AddLayer(df, newlayerpnt)
newlayerline = arcpy.mapping.Layer(rhino_lines)
arcpy.mapping.AddLayer(df, newlayerline)
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.name == "rhinolines":
roadslayer = lyr
if lyr.name == "allPoints":
pointslayer = lyr
arcpy.AddMessage("Layers acquired.")
counter = 1
total = len(ids)
arcpy.AddMessage("Finding measures for: ")
for id in ids:
roadslayer.definitionQuery = " RTE_ID = '" + id + "' "
pointslayer.definitionQuery = " ROUTE_ID = '" + id + "' "
arcpy.RefreshActiveView()
arcpy.AddMessage(str(counter) + "/" + str(total) + " " + id)
label = id.replace("-", "")
arcpy.LocateFeaturesAlongRoutes_lr(pointslayer, roadslayer, "FLAG", "230 Feet", workspace + os.sep + label, "FLAG POINT END_POINT")
counter += 1
arcpy.AddMessage("Tables created.")
# alltables = []
arcpy.env.workspace = workspace
tables = arcpy.ListTables()
for table in tables:
arcpy.AddMessage(table)
arcpy.AddField_management(table, "ODR_FLAG", "TEXT", "", "", 20)
arcpy.AddMessage("Order Flag field created.")
numbDict = {}
cursor = arcpy.da.UpdateCursor(table, ["FLAG", "ODR_FLAG"])
for row in cursor:
flag = row[0]
odr = flag.split("-")[0] + "-" + flag.split("-")[1] + "-" + flag.split("-")[2]
if odr not in numbDict.keys():
numbDict[odr] = 1
else:
curNumb = numbDict[odr]
curNumb += 1
numbDict[odr] = curNumb
row[1] = odr
cursor.updateRow(row)
del cursor
counter = 1
previous = ""
last = ""
cursor = arcpy.da.UpdateCursor(table, ["ODR_FLAG", "BEGIN_POINT", "END_POINT", "SECTION_LENGTH"], None, None, False, (None, "ORDER BY ODR_FLAG ASC, END_POINT ASC"))
for row in cursor:
current = row[0]
total = numbDict[current]
if counter == 1 and counter != total:
values = dictionary[current]
beginner = float(format(float(values[0]), '.3f'))
segEnd = float(format(float(row[2]), '.3f'))
if abs(segEnd - beginner) > 1:
segSrt = segEnd - .1
row[1] = float(format(float(segSrt), '.3f'))
row[2] = segEnd
row[3] = row[2] - row[1]
else:
row[1] = beginner
row[2] = segEnd
row[3] = row[2] - row[1]
elif counter == 1 and counter == total:
values = dictionary[current]
row[1] = float(format(float(values[0]), '.3f'))
row[2] = float(format(float(values[1]), '.3f'))
row[3] = row[2] - row[1]
counter = 0
elif previous == current and counter != total:
row[1] = last
row[2] = float(format(float(row[2]), '.3f'))
row[3] = row[2] - last
elif previous == current and counter == total:
values = dictionary[current]
ender = float(format(float(values[1]), '.3f'))
if abs(ender - last) > 1:
row[1] = last
row[2] = float(format(float(row[2]), '.3f'))
row[3] = row[2] - last
else:
row[1] = last
row[2] = float(format(float(values[1]), '.3f'))
row[3] = row[2] - last
counter = 0
else:
arcpy.AddMessage("problem with " + current)
last = row[2]
cursor.updateRow(row)
previous = current
counter += 1
del cursor
arcpy.AddMessage("Measure difference fields populated.")
arcpy.Merge_management(tables, workspace + os.sep + "merged")
arcpy.AddMessage("All tables merged successfully.")
arcpy.AddField_management(workspace + os.sep + "merged", "RU", "TEXT", "", "", 5)
arcpy.AddMessage("RU field created.")
arcpy.AddField_management(workspace + os.sep + "merged", "F_SYSTEM", "TEXT", "", "", 5)
arcpy.AddMessage("Functional System field created.")
arcpy.AddField_management(workspace + os.sep + "merged", "SEC_NHS", "TEXT", "", "", 5)
arcpy.AddMessage("NHS field created.")
arcpy.AddField_management(workspace + os.sep + "merged", "HPMS", "TEXT", "", "", 5)
arcpy.AddMessage("HPMS Keeper field created.")
# arcpy.AddMessage("Fields created.")
cursor = arcpy.da.UpdateCursor(workspace + os.sep + "merged", ["FLAG", "RU", "F_SYSTEM", "SEC_NHS", "HPMS"])
for row in cursor:
flag = row[0]
row[1] = flag.split("-")[3]
row[2] = flag.split("-")[4]
row[3] = flag.split("-")[5]
row[4] = flag.split("-")[6]
cursor.updateRow(row)
del cursor
LGcounter = 0
KGcounter = 0
LGlength = 0
KGlength = 0
cursor = arcpy.da.SearchCursor(workspace + os.sep + "merged", fields)
for row in cursor:
id = row[0]
if id[-2:] == "LG":
lg.append(row)
LGcounter += 1
LGlength += float(row[3])
elif id[-2:] == "RG":
THEid = id[:-2]
newid = THEid + "KG"
fixed = [newid, row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10]]
data.append(fixed)
KGcounter += 1
KGlength += float(row[3])
if float(row[3]) > 1:
problem = [distName, newid, row[1], row[2], row[3], row[4], row[5], row[6], "Abnormally large SECTION_LENGTH"]
issuesReport.append(problem)
if float(row[3]) == 0:
problem = [distName, newid, row[1], row[2], row[3], row[4], row[5], row[6], "Zero length SECTION_LENGTH"]
issuesReport.append(problem)
else:
data.append(row)
KGcounter += 1
KGlength += float(row[3])
if float(row[3]) > 1:
problem = [distName, newid, row[1], row[2], row[3], row[4], row[5], row[6], "Abnormally large SECTION_LENGTH"]
issuesReport.append(problem)
if float(row[3]) == 0:
problem = [distName, newid, row[1], row[2], row[3], row[4], row[5], row[6], "Zero length SECTION_LENGTH"]
issuesReport.append(problem)
del cursor
arcpy.AddMessage("Data compiled.")
arcpy.AddMessage("Creating CSV report.")
leftover = open(output + os.sep + distName + "_LG_RECS_" + str(inputcntr) + ".csv", 'wb')
writer = csv.writer(leftover)
writer.writerows(lg)
leftover.close()
final = open(output + os.sep + distName + "_Plotted_" + str(inputcntr) + ".csv", 'wb')
writer = csv.writer(final)
writer.writerows(data)
final.close()
arcpy.AddMessage("CSV written.")
TOTALcounter = LGcounter + KGcounter
TOTALlength = LGlength + KGlength
DIFFcounter = initial - TOTALcounter
statsReport.append([distName, LGcounter, KGcounter, TOTALcounter, initial, DIFFcounter, LGlength, KGlength, TOTALlength])
inputcntr += 1
if len(issuesReport) > 1:
arcpy.AddMessage("Creating errors report...")
errors = open(output + os.sep + "00ISSUES_Investigate.csv", 'wb')
writer = csv.writer(errors)
writer.writerows(issuesReport)
errors.close()
arcpy.AddMessage("Creating stats report...")
stats = open(output + os.sep + "00Statistics.csv", 'wb')
writer = csv.writer(stats)
writer.writerows(statsReport)
stats.close()
arcpy.AddMessage("that's all folks!")
arcpy.AddMessage("started: " + str(now))
now2 = datetime.datetime.now()
arcpy.AddMessage("ended: " + str(now2))
print "that's all folks!"
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Asserts and Boolean Checks.
See the @{$python/check_ops} guide.
@@assert_negative
@@assert_positive
@@assert_non_negative
@@assert_non_positive
@@assert_equal
@@assert_none_equal
@@assert_near
@@assert_less
@@assert_less_equal
@@assert_greater
@@assert_greater_equal
@@assert_rank
@@assert_rank_at_least
@@assert_rank_in
@@assert_type
@@assert_integer
@@assert_proper_iterable
@@assert_same_float_dtype
@@assert_scalar
@@is_non_decreasing
@@is_numeric_tensor
@@is_strictly_increasing
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
NUMERIC_TYPES = frozenset(
[dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16, dtypes.int32,
dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32, dtypes.quint8,
dtypes.complex64])
__all__ = [
'assert_negative',
'assert_positive',
'assert_proper_iterable',
'assert_non_negative',
'assert_non_positive',
'assert_equal',
'assert_none_equal',
'assert_near',
'assert_integer',
'assert_less',
'assert_less_equal',
'assert_greater',
'assert_greater_equal',
'assert_rank',
'assert_rank_at_least',
'assert_rank_in',
'assert_same_float_dtype',
'assert_scalar',
'assert_type',
'is_non_decreasing',
'is_numeric_tensor',
'is_strictly_increasing',
]
def _maybe_constant_value_string(t):
if not isinstance(t, ops.Tensor):
return str(t)
const_t = tensor_util.constant_value(t)
if const_t is not None:
return str(const_t)
return t
def _assert_static(condition, data):
"""Raises a InvalidArgumentError with as much information as possible."""
if not condition:
data_static = [_maybe_constant_value_string(x) for x in data]
raise errors.InvalidArgumentError(node_def=None, op=None,
message='\n'.join(data_static))
def _shape_and_dtype_str(tensor):
"""Returns a string containing tensor's shape and dtype."""
return 'shape=%s dtype=%s' % (tensor.shape, tensor.dtype.name)
@tf_export('assert_proper_iterable')
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
+ compat.bytes_or_text_types
)
if isinstance(values, unintentional_iterables):
raise TypeError(
'Expected argument "values" to be a "proper" iterable. Found: %s' %
type(values))
if not hasattr(values, '__iter__'):
raise TypeError(
'Expected argument "values" to be iterable. Found: %s' % type(values))
@tf_export('assert_negative')
def assert_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_negative(x)]):
output = tf.reduce_sum(x)
```
Negative means, for every element `x[i]` of `x`, we have `x[i] < 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.in_eager_mode():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x < 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(x, zero, data=data, summarize=summarize)
@tf_export('assert_positive')
def assert_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_positive(x)]):
output = tf.reduce_sum(x)
```
Positive means, for every element `x[i]` of `x`, we have `x[i] > 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.in_eager_mode():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message, 'Condition x > 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(zero, x, data=data, summarize=summarize)
@tf_export('assert_non_negative')
def assert_non_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x >= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_negative(x)]):
output = tf.reduce_sum(x)
```
Non-negative means, for every element `x[i]` of `x`, we have `x[i] >= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.in_eager_mode():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x >= 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(zero, x, data=data, summarize=summarize)
@tf_export('assert_non_positive')
def assert_non_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_positive(x)]):
output = tf.reduce_sum(x)
```
Non-positive means, for every element `x[i]` of `x`, we have `x[i] <= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.in_eager_mode():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x <= 0 did not hold element-wise:'
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(x, zero, data=data, summarize=summarize)
@tf_export('assert_equal')
def assert_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x == y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] == y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_equal".
Returns:
Op that raises `InvalidArgumentError` if `x == y` is False.
@compatibility{eager} returns None
Raises:
InvalidArgumentError if the check can be performed immediately and
`x == y` is False. The check can be performed immediately during
eager execution or if `x` and `y` are statically known.
"""
message = message or ''
with ops.name_scope(name, 'assert_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.in_eager_mode():
eq = math_ops.equal(x, y)
condition = math_ops.reduce_all(eq)
if not condition:
# Prepare a message with first elements of x and y.
summary_msg = ''
# Default to printing 3 elements like control_flow_ops.Assert (used
# by graph mode) does.
summarize = 3 if summarize is None else summarize
if summarize:
# reshape((-1,)) is the fastest way to get a flat array view.
x_np = x.numpy().reshape((-1,))
y_np = y.numpy().reshape((-1,))
x_sum = min(x_np.size, summarize)
y_sum = min(y_np.size, summarize)
summary_msg = ('First %d elements of x:\n%s\n'
'First %d elements of y:\n%s\n' %
(x_sum, x_np[:x_sum],
y_sum, y_np[:y_sum]))
# Get the values that actually differed and their indices.
mask = math_ops.logical_not(eq)
indices = array_ops.where(mask)
indices_np = indices.numpy()
x_vals = array_ops.boolean_mask(x, mask)
y_vals = array_ops.boolean_mask(y, mask)
summarize = min(summarize, indices_np.shape[0])
raise errors.InvalidArgumentError(
node_def=None, op=None,
message=('%s\nCondition x == y did not hold.\n'
'Indices of first %s different values:\n%s\n'
'Corresponding x values:\n%s\n'
'Corresponding y values:\n%s\n'
'%s'
%
(message or '',
summarize, indices_np[:summarize],
x_vals.numpy().reshape((-1,))[:summarize],
y_vals.numpy().reshape((-1,))[:summarize],
summary_msg)))
return
if data is None:
data = [
message,
'Condition x == y did not hold element-wise:',
'x (%s) = ' % x.name, x,
'y (%s) = ' % y.name, y
]
condition = math_ops.reduce_all(math_ops.equal(x, y))
x_static = tensor_util.constant_value(x)
y_static = tensor_util.constant_value(y)
if x_static is not None and y_static is not None:
condition_static = (x_static == y_static).all()
_assert_static(condition_static, data)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('assert_none_equal')
def assert_none_equal(
x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x != y` holds for all elements.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_none_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] != y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_none_equal".
Returns:
Op that raises `InvalidArgumentError` if `x != y` is ever False.
"""
message = message or ''
with ops.name_scope(name, 'assert_none_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.in_eager_mode():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x != y did not hold for every single element:',
'x (%s) = ' % x_name, x,
'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.not_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('assert_near')
def assert_near(
x, y, rtol=None, atol=None, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x` and `y` are close element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_near(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have
```tf.abs(x[i] - y[i]) <= atol + rtol * tf.abs(y[i])```.
If both `x` and `y` are empty, this is trivially satisfied.
The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest
representable positive number such that `1 + eps != eps`. This is about
`1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`.
See `numpy.finfo`.
Args:
x: Float or complex `Tensor`.
y: Float or complex `Tensor`, same `dtype` as, and broadcastable to, `x`.
rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The relative tolerance. Default is `10 * eps`.
atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The absolute tolerance. Default is `10 * eps`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_near".
Returns:
Op that raises `InvalidArgumentError` if `x` and `y` are not close enough.
@compatibility(numpy)
Similar to `numpy.assert_allclose`, except tolerance depends on data type.
This is due to the fact that `TensorFlow` is often used with `32bit`, `64bit`,
and even `16bit` data.
@end_compatibility
"""
message = message or ''
with ops.name_scope(name, 'assert_near', [x, y, rtol, atol, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y', dtype=x.dtype)
eps = np.finfo(x.dtype.as_numpy_dtype).eps
rtol = 10 * eps if rtol is None else rtol
atol = 10 * eps if atol is None else atol
rtol = ops.convert_to_tensor(rtol, name='rtol', dtype=x.dtype)
atol = ops.convert_to_tensor(atol, name='atol', dtype=x.dtype)
if context.in_eager_mode():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'x and y not equal to tolerance rtol = %s, atol = %s' % (rtol, atol),
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
tol = atol + rtol * math_ops.abs(y)
diff = math_ops.abs(x - y)
condition = math_ops.reduce_all(math_ops.less(diff, tol))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('assert_less')
def assert_less(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] < y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less".
Returns:
Op that raises `InvalidArgumentError` if `x < y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.in_eager_mode():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x < y did not hold element-wise:',
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.less(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('assert_less_equal')
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] <= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less_equal"
Returns:
Op that raises `InvalidArgumentError` if `x <= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.in_eager_mode():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x <= y did not hold element-wise:'
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.less_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('assert_greater')
def assert_greater(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] > y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_greater".
Returns:
Op that raises `InvalidArgumentError` if `x > y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.in_eager_mode():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x > y did not hold element-wise:'
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.greater(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('assert_greater_equal')
def assert_greater_equal(x, y, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x >= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] >= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_greater_equal"
Returns:
Op that raises `InvalidArgumentError` if `x >= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.in_eager_mode():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x >= y did not hold element-wise:'
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.greater_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def _assert_rank_condition(
x, rank, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
static_condition: A python function that takes `[actual_rank, given_rank]`
and returns `True` if the condition is satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_rank]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
rank_static = tensor_util.constant_value(rank)
if rank_static is not None:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, rank_static):
raise ValueError(
'Static rank condition failed', x_rank_static, rank_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), rank)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('assert_rank')
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar integer `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank == given_rank
dynamic_condition = math_ops.equal
if context.in_eager_mode():
name = ''
else:
name = x.name
if data is None:
data = [
message,
'Tensor %s must have rank' % name, rank, 'Received shape: ',
array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank %d. Received rank %d, shape %s' %
(message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
@tf_export('assert_rank_at_least')
def assert_rank_at_least(
x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank` or higher.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_least(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(
name, 'assert_rank_at_least', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank
dynamic_condition = math_ops.greater_equal
if context.in_eager_mode():
name = ''
else:
name = x.name
if data is None:
data = [
message,
'Tensor %s must have rank at least' % name, rank,
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank at least %d. Received rank %d, '
'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def _static_rank_in(actual_rank, given_ranks):
return actual_rank in given_ranks
def _dynamic_rank_in(actual_rank, given_ranks):
if len(given_ranks) < 1:
return ops.convert_to_tensor(False)
result = math_ops.equal(given_ranks[0], actual_rank)
for given_rank in given_ranks[1:]:
result = math_ops.logical_or(
result, math_ops.equal(given_rank, actual_rank))
return result
def _assert_ranks_condition(
x, ranks, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
ranks: Scalar `Tensor`.
static_condition: A python function that takes
`[actual_rank, given_ranks]` and returns `True` if the condition is
satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_ranks]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
for rank in ranks:
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])
if not any(r is None for r in ranks_static):
for rank_static in ranks_static:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, ranks_static):
raise ValueError(
'Static rank condition failed', x_rank_static, ranks_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), ranks)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
for rank, rank_static in zip(ranks, ranks_static):
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('assert_rank_in')
def assert_rank_in(
x, ranks, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank in `ranks`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_in(x, (2, 4))]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
ranks: Iterable of scalar `Tensor` objects.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has mismatched rank.
"""
with ops.name_scope(
name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])
message = message or ''
if context.in_eager_mode():
name = ''
else:
name = x.name
if data is None:
data = [
message, 'Tensor %s must have rank in' % name
] + list(ranks) + [
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_ranks_condition(x, ranks, _static_rank_in,
_dynamic_rank_in, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank in %s. Received rank %d, '
'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
@tf_export('assert_integer')
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_integer(x)]):
output = tf.reduce_sum(x)
```
Args:
x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is anything other than non-quantized integer.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_integer', [x]):
x = ops.convert_to_tensor(x, name='x')
if not x.dtype.is_integer:
if context.in_eager_mode():
name = 'tensor'
else:
name = x.name
err_msg = (
'%s Expected "x" to be integer type. Found: %s of dtype %s'
% (message, name, x.dtype))
raise TypeError(err_msg)
return control_flow_ops.no_op('statically_determined_was_integer')
@tf_export('assert_type')
def assert_type(tensor, tf_type, message=None, name=None):
"""Statically asserts that the given `Tensor` is of the specified type.
Args:
tensor: A tensorflow `Tensor`.
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name to give this `Op`. Defaults to "assert_type"
Raises:
TypeError: If the tensors data type doesn't match `tf_type`.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_type', [tensor]):
tensor = ops.convert_to_tensor(tensor, name='tensor')
if tensor.dtype != tf_type:
if context.in_graph_mode():
raise TypeError(
'%s %s must be of type %s' % (message, tensor.name, tf_type))
else:
raise TypeError(
'%s tensor must be of type %s' % (message, tf_type))
return control_flow_ops.no_op('statically_determined_correct_type')
# pylint: disable=line-too-long
def _get_diff_for_monotonic_comparison(x):
"""Gets the difference x[1:] - x[:-1]."""
x = array_ops.reshape(x, [-1])
if not is_numeric_tensor(x):
raise TypeError('Expected x to be numeric, instead found: %s' % x)
# If x has less than 2 elements, there is nothing to compare. So return [].
is_shorter_than_two = math_ops.less(array_ops.size(x), 2)
short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype)
# With 2 or more elements, return x[1:] - x[:-1]
s_len = array_ops.shape(x) - 1
diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len)
return control_flow_ops.cond(is_shorter_than_two, short_result, diff)
@tf_export('is_numeric_tensor')
def is_numeric_tensor(tensor):
return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES
@tf_export('is_non_decreasing')
def is_non_decreasing(x, name=None):
"""Returns `True` if `x` is non-decreasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
If `x` has less than two elements, it is trivially non-decreasing.
See also: `is_strictly_increasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional). Defaults to "is_non_decreasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_non_decreasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less_equal(zero, diff))
@tf_export('is_strictly_increasing')
def is_strictly_increasing(x, name=None):
"""Returns `True` if `x` is strictly increasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
If `x` has less than two elements, it is trivially strictly increasing.
See also: `is_non_decreasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional).
Defaults to "is_strictly_increasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_strictly_increasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less(zero, diff))
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
@tf_export('assert_same_float_dtype')
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be a floating point type. If neither `tensors` nor `dtype` is supplied,
the function will return `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float, or the common type of the inputs is not a floating point type.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected floating point type, got %s.' % dtype)
return dtype
@tf_export('assert_scalar')
def assert_scalar(tensor, name=None):
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
shape = tensor.get_shape()
if shape.ndims != 0:
if context.in_eager_mode():
raise ValueError('Expected scalar shape, saw shape: %s.'
% (shape,))
else:
raise ValueError('Expected scalar shape for %s, saw shape: %s.'
% (tensor.name, shape))
return tensor
| |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main, TestCase
from json import loads, dumps
from tornado.web import HTTPError
from qiita_db.handlers.tests.oauthbase import OauthTestingBase
from qiita_db.handlers.plugin import _get_plugin, _get_command
import qiita_db as qdb
class UtilTests(TestCase):
def test_get_plugin(self):
obs = _get_plugin("QIIME", "1.9.1")
exp = qdb.software.Software(1)
self.assertEqual(obs, exp)
# It does not exist
with self.assertRaises(HTTPError):
_get_plugin("QiIME", "1.9.1")
def test_get_command(self):
obs = _get_command('QIIME', '1.9.1', 'Split libraries FASTQ')
exp = qdb.software.Command(1)
self.assertEqual(obs, exp)
# It does not exist
with self.assertRaises(HTTPError):
_get_command('QIIME', '1.9.1', 'UNKNOWN')
class PluginHandlerTests(OauthTestingBase):
def test_get_plugin_does_not_exist(self):
obs = self.get('/qiita_db/plugins/QIIME/1.9.0/', headers=self.header)
self.assertEqual(obs.code, 404)
def test_get_no_header(self):
obs = self.get('/qiita_db/plugins/QIIME/1.9.0/')
self.assertEqual(obs.code, 400)
def test_get(self):
obs = self.get('/qiita_db/plugins/QIIME/1.9.1/', headers=self.header)
self.assertEqual(obs.code, 200)
exp = {
'name': 'QIIME',
'version': '1.9.1',
'description': 'Quantitative Insights Into Microbial Ecology '
'(QIIME) is an open-source bioinformatics pipeline '
'for performing microbiome analysis from raw DNA '
'sequencing data',
'commands': ['Split libraries FASTQ', 'Split libraries',
'Pick closed-reference OTUs', 'Summarize Taxa',
'Beta Diversity', 'Alpha Rarefaction',
'Single Rarefaction'],
'publications': [{'DOI': '10.1038/nmeth.f.303',
'PubMed': '20383131'}],
'default_workflows': ['FASTQ upstream workflow',
'FASTA upstream workflow',
'Per sample FASTQ upstream workflow'],
'type': 'artifact transformation',
'active': False}
self.assertEqual(loads(obs.body), exp)
class CommandListHandlerTests(OauthTestingBase):
def test_post(self):
data = {
'name': 'New Command',
'description': 'Command added for testing',
'required_parameters': dumps(
{'in_data': ['artifact:["FASTA"]', None]}),
'optional_parameters': dumps(
{'param1': ['string', ''],
'param2': ['float', '1.5'],
'param3': ['boolean', 'True'],
'param4': ['mchoice:["opt1", "opt2", "opt3"]',
dumps(['opt1', 'opt2'])]}),
'outputs': dumps({'out1': 'BIOM'}),
'default_parameter_sets': dumps(
{'dflt1': {'param1': 'test',
'param2': '2.4',
'param3': 'False'}})
}
obs = self.post('/qiita_db/plugins/QIIME/1.9.1/commands/', data=data,
headers=self.header)
self.assertEqual(obs.code, 200)
obs = _get_command('QIIME', '1.9.1', 'New Command')
self.assertEqual(obs.name, 'New Command')
self.assertFalse(obs.analysis_only)
# Create a new command that is analysis only
data = {
'name': 'New analysis command',
'description': 'Analysis command added for testing',
'required_parameters': dumps(
{'in_data': ['artifact:["BIOM"]', None]}),
'optional_parameters': dumps(
{'param1': ['string', 'default'],
'param4': ['mchoice:["opt1", "opt2", "opt3"]',
dumps(['opt1', 'opt2']), None, True]}),
'outputs': dumps({'outtable': 'BIOM'}),
'default_parameter_sets': dumps({'dflt1': {'param1': 'test'}}),
'analysis_only': True
}
obs = self.post('/qiita_db/plugins/QIIME/1.9.1/commands/', data=data,
headers=self.header)
self.assertEqual(obs.code, 200)
obs = _get_command('QIIME', '1.9.1', 'New analysis command')
self.assertEqual(obs.name, 'New analysis command')
self.assertTrue(obs.analysis_only)
self.assertEqual(obs.merging_scheme,
{'parameters': ['param4'], 'outputs': []})
class CommandHandlerTests(OauthTestingBase):
def test_get_command_does_not_exist(self):
obs = self.get('/qiita_db/plugins/QIIME/1.9.1/commands/UNKNOWN/',
headers=self.header)
self.assertEqual(obs.code, 404)
def test_get_no_header(self):
obs = self.get(
'/qiita_db/plugins/QIIME/1.9.1/commands/Split%20libraries/')
self.assertEqual(obs.code, 400)
def test_get(self):
obs = self.get(
'/qiita_db/plugins/QIIME/1.9.1/commands/Split%20libraries/',
headers=self.header)
self.assertEqual(obs.code, 200)
exp = {'name': 'Split libraries',
'description': 'Demultiplexes and applies quality control to '
'FASTA data',
'required_parameters': {
'input_data': ['artifact', ['FASTA', 'FASTA_Sanger',
'SFF']]},
'optional_parameters': {
'barcode_type': ['string', 'golay_12'],
'disable_bc_correction': ['bool', 'False'],
'disable_primers': ['bool', 'False'],
'max_ambig': ['integer', '6'],
'max_barcode_errors': ['float', '1.5'],
'max_homopolymer': ['integer', '6'],
'max_primer_mismatch': ['integer', '0'],
'max_seq_len': ['integer', '1000'],
'min_qual_score': ['integer', '25'],
'min_seq_len': ['integer', '200'],
'qual_score_window': ['integer', '0'],
'reverse_primer_mismatches': ['integer', '0'],
'reverse_primers': ['choice:["disable", "truncate_only", '
'"truncate_remove"]', 'disable'],
'trim_seq_length': ['bool', 'False'],
'truncate_ambi_bases': ['bool', 'False']},
'default_parameter_sets': {
'Defaults with Golay 12 barcodes': {
'reverse_primers': 'disable',
'reverse_primer_mismatches': 0,
'disable_bc_correction': False,
'max_barcode_errors': 1.5,
'disable_primers': False,
'min_seq_len': 200,
'truncate_ambi_bases': False,
'max_ambig': 6,
'min_qual_score': 25,
'trim_seq_length': False,
'max_seq_len': 1000,
'max_primer_mismatch': 0,
'max_homopolymer': 6,
'qual_score_window': 0,
'barcode_type': 'golay_12'},
'Defaults with Hamming 8 barcodes': {
'reverse_primers': 'disable',
'reverse_primer_mismatches': 0,
'disable_bc_correction': False,
'max_barcode_errors': 1.5,
'disable_primers': False,
'min_seq_len': 200,
'truncate_ambi_bases': False,
'max_ambig': 6,
'min_qual_score': 25,
'trim_seq_length': False,
'max_seq_len': 1000,
'max_primer_mismatch': 0,
'max_homopolymer': 6,
'qual_score_window': 0,
'barcode_type': 'hamming_8'}}}
self.assertEqual(loads(obs.body), exp)
class CommandActivateHandlerTests(OauthTestingBase):
def test_post_command_does_not_exist(self):
obs = self.post('/qiita_db/plugins/QIIME/1.9.1/commands/'
'UNKNOWN/activate/',
headers=self.header, data={})
self.assertEqual(obs.code, 404)
def test_post_no_header(self):
obs = self.post('/qiita_db/plugins/QIIME/1.9.1/commands/'
'Split%20libraries/activate/', data={})
self.assertEqual(obs.code, 400)
def test_post(self):
qdb.software.Software.deactivate_all()
self.assertFalse(qdb.software.Command(2).active)
obs = self.post('/qiita_db/plugins/QIIME/1.9.1/commands/'
'Split%20libraries/activate/', headers=self.header,
data={})
self.assertEqual(obs.code, 200)
self.assertTrue(qdb.software.Command(2).active)
class ReloadPluginAPItestHandlerTests(OauthTestingBase):
def test_post_no_header(self):
obs = self.post('/apitest/reload_plugins/', data={})
self.assertEqual(obs.code, 400)
def test_post(self):
obs = self.post('/apitest/reload_plugins/', headers=self.header,
data={})
self.assertEqual(obs.code, 200)
if __name__ == '__main__':
main()
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from collections import abc
import contextlib
import itertools
import logging
import re
from alembic.migration import MigrationContext
from alembic.operations import Operations
import debtcollector.removals
from oslo_utils import timeutils
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import Connectable
from sqlalchemy.engine import url as sa_url
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.sql.expression import cast
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql import text
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from oslo_db._i18n import _
from oslo_db import exception
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import ndb
# NOTE(ochuprykov): Add references for backwards compatibility
InvalidSortKey = exception.InvalidSortKey
ColumnError = exception.ColumnError
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
_VALID_SORT_DIR = [
"-".join(x) for x in itertools.product(["asc", "desc"],
["nullsfirst", "nullslast"])]
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
def get_unique_keys(model):
"""Get a list of sets of unique model keys.
:param model: the ORM model class
:rtype: list of sets of strings
:return: unique model keys or None if unable to find them
"""
try:
mapper = inspect(model)
except exc.NoInspectionAvailable:
return None
else:
local_table = mapper.local_table
base_table = mapper.base_mapper.local_table
if local_table is None:
return None
# extract result from cache if present
has_info = hasattr(local_table, 'info')
if has_info:
info = local_table.info
if 'oslodb_unique_keys' in info:
return info['oslodb_unique_keys']
res = []
try:
constraints = base_table.constraints
except AttributeError:
constraints = []
for constraint in constraints:
# filter out any CheckConstraints
if isinstance(constraint, (sqlalchemy.UniqueConstraint,
sqlalchemy.PrimaryKeyConstraint)):
res.append({c.name for c in constraint.columns})
try:
indexes = base_table.indexes
except AttributeError:
indexes = []
for index in indexes:
if index.unique:
res.append({c.name for c in index.columns})
# cache result for next calls with the same model
if has_info:
info['oslodb_unique_keys'] = res
return res
def _stable_sorting_order(model, sort_keys):
"""Check whether the sorting order is stable.
:return: True if it is stable, False if it's not, None if it's impossible
to determine.
"""
keys = get_unique_keys(model)
if keys is None:
return None
sort_keys_set = set(sort_keys)
for unique_keys in keys:
if unique_keys.issubset(sort_keys_set):
return True
return False
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions and cases where k2,
k3, ... are nullable.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
The "offset" parameter is intentionally avoided. As offset requires a
full scan through the preceding results each time, criteria-based
pagination is preferred. See http://use-the-index-luke.com/no-offset
for further background.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
suffix -nullsfirst, -nullslast can be added to defined
the ordering of null values
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if _stable_sorting_order(model, sort_keys) is False:
LOG.warning('Unique keys not in sort_keys. '
'The sorting order may be unstable.')
if sort_dir and sort_dirs:
raise AssertionError('Disallow set sort_dir and '
'sort_dirs at the same time.')
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
if len(sort_dirs) != len(sort_keys):
raise AssertionError('sort_dirs and sort_keys must have same length.')
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
inspect(model).all_orm_descriptors[current_sort_key]
except KeyError:
raise exception.InvalidSortKey(current_sort_key)
else:
sort_key_attr = getattr(model, current_sort_key)
try:
main_sort_dir, __, null_sort_dir = current_sort_dir.partition("-")
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[main_sort_dir]
null_order_by_stmt = {
"": None,
"nullsfirst": sort_key_attr.is_(None),
"nullslast": sort_key_attr.isnot(None),
}[null_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be one of: %s") %
", ".join(_VALID_SORT_DIR))
if null_order_by_stmt is not None:
query = query.order_by(sqlalchemy.desc(null_order_by_stmt))
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(len(sort_keys)):
crit_attrs = []
# NOTE: We skip the marker value comparison if marker_values[i] is
# None, for two reasons: 1) the comparison operators below
# ('<', '>') are not applicable on None value; 2) this is
# safe because we can assume the primary key is included in
# sort_key, thus checked as (one of) marker values.
if marker_values[i] is not None:
for j in range(i):
model_attr = getattr(model, sort_keys[j])
if marker_values[j] is not None:
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
val = marker_values[i]
# sqlalchemy doesn't like booleans in < >. bug/1656947
if isinstance(model_attr.type, Boolean):
val = int(val)
model_attr = cast(model_attr, Integer)
if sort_dirs[i].startswith('desc'):
crit_attr = (model_attr < val)
if sort_dirs[i].endswith('nullsfirst'):
crit_attr = sqlalchemy.sql.or_(crit_attr,
model_attr.is_(None))
else:
crit_attr = (model_attr > val)
if sort_dirs[i].endswith('nullslast'):
crit_attr = sqlalchemy.sql.or_(crit_attr,
model_attr.is_(None))
crit_attrs.append(crit_attr)
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, abc.Iterable) or isinstance(x, str):
return [x]
elif isinstance(x, list):
return x
else:
return list(x)
def _read_deleted_filter(query, db_model, deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if deleted:
query = query.filter(db_model.deleted != default_deleted_value)
else:
query = query.filter(db_model.deleted == default_deleted_value)
return query
def _project_filter(query, db_model, project_id):
if 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if isinstance(project_id, (list, tuple, set)):
query = query.filter(db_model.project_id.in_(project_id))
else:
query = query.filter(db_model.project_id == project_id)
return query
def model_query(model, session, args=None, **kwargs):
"""Query helper for db.sqlalchemy api methods.
This accounts for `deleted` and `project_id` fields.
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
Keyword arguments:
:keyword project_id: If present, allows filtering by project_id(s).
Can be either a project_id value, or an iterable of
project_id values, or None. If an iterable is passed,
only rows whose project_id column value is on the
`project_id` list will be returned. If None is passed,
only rows which are not bound to any project, will be
returned.
:type project_id: iterable,
model.__table__.columns.project_id.type,
None type
:keyword deleted: If present, allows filtering by deleted field.
If True is passed, only deleted entries will be
returned, if False - only existing entries.
:type deleted: bool
Usage:
.. code-block:: python
from oslo_db.sqlalchemy import utils
def get_instance_by_uuid(uuid):
session = get_session()
with session.begin()
return (utils.model_query(models.Instance, session=session)
.filter(models.Instance.uuid == uuid)
.first())
def get_nodes_stat():
data = (Node.id, Node.cpu, Node.ram, Node.hdd)
session = get_session()
with session.begin()
return utils.model_query(Node, session=session, args=data).all()
Also you can create your own helper, based on ``utils.model_query()``.
For example, it can be useful if you plan to use ``project_id`` and
``deleted`` parameters from project's ``context``
.. code-block:: python
from oslo_db.sqlalchemy import utils
def _model_query(context, model, session=None, args=None,
project_id=None, project_only=False,
read_deleted=None):
# We suppose, that functions ``_get_project_id()`` and
# ``_get_deleted()`` should handle passed parameters and
# context object (for example, decide, if we need to restrict a user
# to query his own entries by project_id or only allow admin to read
# deleted entries). For return values, we expect to get
# ``project_id`` and ``deleted``, which are suitable for the
# ``model_query()`` signature.
kwargs = {}
if project_id is not None:
kwargs['project_id'] = _get_project_id(context, project_id,
project_only)
if read_deleted is not None:
kwargs['deleted'] = _get_deleted_dict(context, read_deleted)
session = session or get_session()
with session.begin():
return utils.model_query(model, session=session,
args=args, **kwargs)
def get_instance_by_uuid(context, uuid):
return (_model_query(context, models.Instance, read_deleted='yes')
.filter(models.Instance.uuid == uuid)
.first())
def get_nodes_data(context, project_id, project_only='allow_none'):
data = (Node.id, Node.cpu, Node.ram, Node.hdd)
return (_model_query(context, Node, args=data, project_id=project_id,
project_only=project_only)
.all())
"""
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
if 'deleted' in kwargs:
query = _read_deleted_filter(query, model, kwargs['deleted'])
if 'project_id' in kwargs:
query = _project_filter(query, model, kwargs['project_id'])
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
.. warning::
Do not use this method when creating ForeignKeys in database migrations
because sqlalchemy needs the same MetaData object to hold information
about the parent table and the reference table in the ForeignKey. This
method uses a unique MetaData object per table object so it won't work
with ForeignKey creation.
"""
metadata = MetaData()
return Table(name, metadata, autoload_with=engine)
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by SQLite.")
raise exception.ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise exception.ColumnError(msg % column_name)
return column
def drop_old_duplicate_entries_from_table(engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
table = Table(table_name, meta, autoload_with=engine)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = sqlalchemy.sql.select(
*columns_for_select,
).group_by(
*columns_for_group_by
).having(
func.count(table.c.id) > 1
)
with engine.connect() as conn, conn.begin():
for row in conn.execute(duplicated_rows_select).fetchall():
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row._mapping[name]
rows_to_delete_select = sqlalchemy.sql.select(
table.c.id,
).where(delete_condition)
for row in conn.execute(rows_to_delete_select).fetchall():
LOG.info(
"Deleting duplicated row with id: %(id)s from table: "
"%(table)s", dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
conn.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise exception.ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(engine, table_name, indexes):
table = get_table(engine, table_name)
real_indexes = get_indexes(engine, table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(engine)
@debtcollector.removals.remove(
message='This API is intended for use with sqlalchemy-migrate, support '
'for which is deprecated for removal; it will be removed in a future '
'release',
version='10.1.0',
)
def change_deleted_column_type_to_boolean(engine, table_name,
**col_name_col_instance):
if engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
engine, table_name, **col_name_col_instance)
indexes = get_indexes(engine, table_name)
table = get_table(engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
table.metadata.bind = engine
try:
old_deleted.create(table, populate_default=False)
finally:
table.metadata.bind = None
with engine.connect() as conn, conn.begin():
conn.execute(
table.update().where(
table.c.deleted == table.c.id
).values(old_deleted=True)
)
table.metadata.bind = engine
try:
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
finally:
table.metadata.bind = None
_restore_indexes_on_deleted_columns(engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(engine, table_name,
**col_name_col_instance):
table = get_table(engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
# FIXME(stephenfin): We shouldn't be using this private API;
# figure out how else to copy an arbitrary column schema
column_copy = column._copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
# FIXME(stephenfin): We shouldn't be using this private API;
# figure out how else to copy an arbitrary column schema
constraints = [constraint._copy() for constraint in table.constraints]
with engine.connect() as conn:
meta = table.metadata
new_table = Table(
table_name + "__tmp__", meta,
*(columns + constraints))
with conn.begin():
new_table.create(conn)
indexes = []
for index in get_indexes(engine, table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(
Index(index["name"], *column_names, unique=index["unique"])
)
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
with conn.begin():
table.drop(conn)
for index in indexes:
index.create(conn)
table.metadata.bind = engine
try:
new_table.rename(table_name)
finally:
table.metadata.bind = None
with conn.begin():
conn.execute(
new_table.update().where(
new_table.c.deleted == new_table.c.id
).values(deleted=True)
)
@debtcollector.removals.remove(
message='This API is intended for use with sqlalchemy-migrate, support '
'for which is deprecated for removal; it will be removed in a future '
'release',
version='10.1.0',
)
def change_deleted_column_type_to_id_type(engine, table_name,
**col_name_col_instance):
if engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
engine, table_name, **col_name_col_instance)
indexes = get_indexes(engine, table_name)
table = get_table(engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
table.metadata.bind = engine
try:
new_deleted.create(table, populate_default=True)
finally:
table.metadata.bind = None
table.metadata.bind = engine
try:
with engine.connect() as conn, conn.begin():
deleted = True # workaround for pyflakes
conn.execute(
table.update().where(
table.c.deleted == deleted
).values(new_deleted=table.c.id)
)
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(engine, table_name, indexes)
finally:
table.metadata.bind = None
def _is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
# NOTE(zzzeek): SQLite never reflected CHECK contraints here
# in any case until version 1.1. Safe to assume that any CHECK
# that's talking about the value of "deleted in (something)" is
# the boolean constraint we're looking to get rid of.
return bool(re.match(r".*deleted in \(.*\)", sqltext, re.I))
def _change_deleted_column_type_to_id_type_sqlite(engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlalchemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has two check
# constraints. There is only one way to remove these constraints:
#
# 1) Create new table with the same columns, constraints and indexes.
# (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
meta = MetaData()
table = Table(table_name, meta, autoload_with=engine)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
# FIXME(stephenfin): We shouldn't be using this private API;
# figure out how else to copy an arbitrary column schema
column_copy = column._copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
constraints = []
for constraint in table.constraints:
if not _is_deleted_column_constraint(constraint):
# FIXME(stephenfin): We shouldn't be using this private API;
# figure out how else to copy an arbitrary constraint schema
constraints.append(constraint._copy())
with engine.connect() as conn:
# we need separate transactions, since we must create the table before
# we can copy entries into it (later)
with conn.begin():
new_table = Table(
table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create(conn)
indexes = []
for index in get_indexes(engine, table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(
Index(index["name"], *column_names, unique=index["unique"])
)
with conn.begin():
table.drop(conn)
for index in indexes:
index.create(conn)
with conn.begin():
new_table.metadata.bind = engine
try:
new_table.rename(table_name)
finally:
new_table.metadata.bind = None
deleted = True # workaround for pyflakes
conn.execute(
new_table.update().where(
new_table.c.deleted == deleted
).values(deleted=new_table.c.id)
)
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
conn.execute(
new_table.update().where(
new_table.c.deleted == deleted
).values(deleted=default_deleted_value)
)
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
def get_indexes(engine, table_name):
"""Get all index list from a given table.
:param engine: sqlalchemy engine
:param table_name: name of the table
"""
inspector = sqlalchemy.inspect(engine)
indexes = inspector.get_indexes(table_name)
return indexes
def index_exists(engine, table_name, index_name):
"""Check if given index exists.
:param engine: sqlalchemy engine
:param table_name: name of the table
:param index_name: name of the index
"""
indexes = get_indexes(engine, table_name)
index_names = [index['name'] for index in indexes]
return index_name in index_names
def index_exists_on_columns(engine, table_name, columns):
"""Check if an index on given columns exists.
:param engine: sqlalchemy engine
:param table_name: name of the table
:param columns: a list type of columns that will be checked
"""
if not isinstance(columns, list):
columns = list(columns)
for index in get_indexes(engine, table_name):
if index['column_names'] == columns:
return True
return False
def add_index(engine, table_name, index_name, idx_columns):
"""Create an index for given columns.
:param engine: sqlalchemy engine
:param table_name: name of the table
:param index_name: name of the index
:param idx_columns: tuple with names of columns that will be indexed
"""
table = get_table(engine, table_name)
if not index_exists(engine, table_name, index_name):
index = Index(
index_name, *[getattr(table.c, col) for col in idx_columns]
)
index.create(engine)
else:
raise ValueError("Index '%s' already exists!" % index_name)
def drop_index(engine, table_name, index_name):
"""Drop index with given name.
:param engine: sqlalchemy engine
:param table_name: name of the table
:param index_name: name of the index
"""
table = get_table(engine, table_name)
for index in table.indexes:
if index.name == index_name:
index.drop(engine)
break
else:
raise ValueError("Index '%s' not found!" % index_name)
def change_index_columns(engine, table_name, index_name, new_columns):
"""Change set of columns that are indexed by given index.
:param engine: sqlalchemy engine
:param table_name: name of the table
:param index_name: name of the index
:param new_columns: tuple with names of columns that will be indexed
"""
drop_index(engine, table_name, index_name)
add_index(engine, table_name, index_name, new_columns)
def column_exists(engine, table_name, column):
"""Check if table has given column.
:param engine: sqlalchemy engine
:param table_name: name of the table
:param column: name of the colmn
"""
t = get_table(engine, table_name)
return column in t.c
class DialectFunctionDispatcher(object):
@classmethod
def dispatch_for_dialect(cls, expr, multiple=False):
"""Provide dialect-specific functionality within distinct functions.
e.g.::
@dispatch_for_dialect("*")
def set_special_option(engine):
pass
@set_special_option.dispatch_for("sqlite")
def set_sqlite_special_option(engine):
return engine.execute("sqlite thing")
@set_special_option.dispatch_for("mysql+mysqldb")
def set_mysqldb_special_option(engine):
return engine.execute("mysqldb thing")
After the above registration, the ``set_special_option()`` function
is now a dispatcher, given a SQLAlchemy ``Engine``, ``Connection``,
URL string, or ``sqlalchemy.engine.URL`` object::
eng = create_engine('...')
result = set_special_option(eng)
The filter system supports two modes, "multiple" and "single".
The default is "single", and requires that one and only one function
match for a given backend. In this mode, the function may also
have a return value, which will be returned by the top level
call.
"multiple" mode, on the other hand, does not support return
arguments, but allows for any number of matching functions, where
each function will be called::
# the initial call sets this up as a "multiple" dispatcher
@dispatch_for_dialect("*", multiple=True)
def set_options(engine):
# set options that apply to *all* engines
@set_options.dispatch_for("postgresql")
def set_postgresql_options(engine):
# set options that apply to all Postgresql engines
@set_options.dispatch_for("postgresql+psycopg2")
def set_postgresql_psycopg2_options(engine):
# set options that apply only to "postgresql+psycopg2"
@set_options.dispatch_for("*+pyodbc")
def set_pyodbc_options(engine):
# set options that apply to all pyodbc backends
Note that in both modes, any number of additional arguments can be
accepted by member functions. For example, to populate a dictionary of
options, it may be passed in::
@dispatch_for_dialect("*", multiple=True)
def set_engine_options(url, opts):
pass
@set_engine_options.dispatch_for("mysql+mysqldb")
def _mysql_set_default_charset_to_utf8(url, opts):
opts.setdefault('charset', 'utf-8')
@set_engine_options.dispatch_for("sqlite")
def _set_sqlite_in_memory_check_same_thread(url, opts):
if url.database in (None, 'memory'):
opts['check_same_thread'] = False
opts = {}
set_engine_options(url, opts)
The driver specifiers are of the form:
``<database | *>[+<driver | *>]``. That is, database name or "*",
followed by an optional ``+`` sign with driver or "*". Omitting
the driver name implies all drivers for that database.
"""
if multiple:
cls = DialectMultiFunctionDispatcher
else:
cls = DialectSingleFunctionDispatcher
return cls().dispatch_for(expr)
_db_plus_driver_reg = re.compile(r'([^+]+?)(?:\+(.+))?$')
def dispatch_for(self, expr):
def decorate(fn):
dbname, driver = self._parse_dispatch(expr)
if fn is self:
fn = fn._last
self._last = fn
self._register(expr, dbname, driver, fn)
return self
return decorate
def _parse_dispatch(self, text):
m = self._db_plus_driver_reg.match(text)
if not m:
raise ValueError("Couldn't parse database[+driver]: %r" % text)
return m.group(1) or '*', m.group(2) or '*'
def __call__(self, *arg, **kw):
target = arg[0]
return self._dispatch_on(
self._url_from_target(target), target, arg, kw)
def _url_from_target(self, target):
if isinstance(target, Connectable):
return target.engine.url
elif isinstance(target, str):
if "://" not in target:
target_url = sa_url.make_url("%s://" % target)
else:
target_url = sa_url.make_url(target)
return target_url
elif isinstance(target, sa_url.URL):
return target
else:
raise ValueError("Invalid target type: %r" % target)
def dispatch_on_drivername(self, drivername):
"""Return a sub-dispatcher for the given drivername.
This provides a means of calling a different function, such as the
"*" function, for a given target object that normally refers
to a sub-function.
"""
dbname, driver = self._db_plus_driver_reg.match(drivername).group(1, 2)
def go(*arg, **kw):
return self._dispatch_on_db_driver(dbname, "*", arg, kw)
return go
def _dispatch_on(self, url, target, arg, kw):
dbname, driver = self._db_plus_driver_reg.match(
url.drivername).group(1, 2)
if not driver:
driver = url.get_dialect().driver
return self._dispatch_on_db_driver(dbname, driver, arg, kw)
def _invoke_fn(self, fn, arg, kw):
return fn(*arg, **kw)
class DialectSingleFunctionDispatcher(DialectFunctionDispatcher):
def __init__(self):
self.reg = collections.defaultdict(dict)
def _register(self, expr, dbname, driver, fn):
fn_dict = self.reg[dbname]
if driver in fn_dict:
raise TypeError("Multiple functions for expression %r" % expr)
fn_dict[driver] = fn
def _matches(self, dbname, driver):
for db in (dbname, '*'):
subdict = self.reg[db]
for drv in (driver, '*'):
if drv in subdict:
return subdict[drv]
else:
raise ValueError(
"No default function found for driver: %r" %
("%s+%s" % (dbname, driver)))
def _dispatch_on_db_driver(self, dbname, driver, arg, kw):
fn = self._matches(dbname, driver)
return self._invoke_fn(fn, arg, kw)
class DialectMultiFunctionDispatcher(DialectFunctionDispatcher):
def __init__(self):
self.reg = collections.defaultdict(
lambda: collections.defaultdict(list))
def _register(self, expr, dbname, driver, fn):
self.reg[dbname][driver].append(fn)
def _matches(self, dbname, driver):
if driver != '*':
drivers = (driver, '*')
else:
drivers = ('*', )
for db in (dbname, '*'):
subdict = self.reg[db]
for drv in drivers:
for fn in subdict[drv]:
yield fn
def _dispatch_on_db_driver(self, dbname, driver, arg, kw):
for fn in self._matches(dbname, driver):
if self._invoke_fn(fn, arg, kw) is not None:
raise TypeError(
"Return value not allowed for "
"multiple filtered function")
dispatch_for_dialect = DialectFunctionDispatcher.dispatch_for_dialect
def get_non_innodb_tables(connectable, skip_tables=('migrate_version',
'alembic_version')):
"""Get a list of tables which don't use InnoDB storage engine.
:param connectable: a SQLAlchemy Engine or a Connection instance
:param skip_tables: a list of tables which might have a different
storage engine
"""
query_str = """
SELECT table_name
FROM information_schema.tables
WHERE table_schema = :database AND
engine != 'InnoDB'
"""
params = {}
if skip_tables:
params = dict(
('skip_%s' % i, table_name)
for i, table_name in enumerate(skip_tables)
)
placeholders = ', '.join(':' + p for p in params)
query_str += ' AND table_name NOT IN (%s)' % placeholders
params['database'] = connectable.engine.url.database
query = text(query_str)
# TODO(stephenfin): What about if this is already a Connection?
with connectable.connect() as conn, conn.begin():
noninnodb = conn.execute(query, params)
return [i[0] for i in noninnodb]
def get_non_ndbcluster_tables(connectable, skip_tables=None):
"""Get a list of tables which don't use MySQL Cluster (NDB) storage engine.
:param connectable: a SQLAlchemy Engine or Connection instance
:param skip_tables: a list of tables which might have a different
storage engine
"""
query_str = """
SELECT table_name
FROM information_schema.tables
WHERE table_schema = :database AND
engine != 'ndbcluster'
"""
params = {}
if skip_tables:
params = dict(
('skip_%s' % i, table_name)
for i, table_name in enumerate(skip_tables)
)
placeholders = ', '.join(':' + p for p in params)
query_str += ' AND table_name NOT IN (%s)' % placeholders
params['database'] = connectable.engine.url.database
query = text(query_str)
nonndbcluster = connectable.execute(query, **params)
return [i[0] for i in nonndbcluster]
def get_foreign_key_constraint_name(engine, table_name, column_name):
"""Find the name of foreign key in a table, given constrained column name.
:param engine: a SQLAlchemy engine (or connection)
:param table_name: name of table which contains the constraint
:param column_name: name of column that is constrained by the foreign key.
:return: the name of the first foreign key constraint which constrains
the given column in the given table.
"""
insp = inspect(engine)
for fk in insp.get_foreign_keys(table_name):
if column_name in fk['constrained_columns']:
return fk['name']
@contextlib.contextmanager
def suspend_fk_constraints_for_col_alter(
engine, table_name, column_name, referents=[]):
"""Detect foreign key constraints, drop, and recreate.
This is used to guard against a column ALTER that on some backends
cannot proceed unless foreign key constraints are not present.
e.g.::
from oslo_db.sqlalchemy.util import (
suspend_fk_constraints_for_col_alter
)
with suspend_fk_constraints_for_col_alter(
migrate_engine, "user_table",
referents=[
"local_user", "nonlocal_user", "project"
]):
user_table.c.domain_id.alter(nullable=False)
:param engine: a SQLAlchemy engine (or connection)
:param table_name: target table name. All foreign key constraints
that refer to the table_name / column_name will be dropped and recreated.
:param column_name: target column name. all foreign key constraints
which refer to this column, either partially or fully, will be dropped
and recreated.
:param referents: sequence of string table names to search for foreign
key constraints. A future version of this function may no longer
require this argument, however for the moment it is required.
"""
if (
not ndb.ndb_status(engine)
):
yield
else:
with engine.connect() as conn:
insp = inspect(conn)
fks = []
for ref_table_name in referents:
for fk in insp.get_foreign_keys(ref_table_name):
if not fk.get('name'):
raise AssertionError("foreign key hasn't a name.")
if fk['referred_table'] == table_name and \
column_name in fk['referred_columns']:
fk['source_table'] = ref_table_name
if 'options' not in fk:
fk['options'] = {}
fks.append(fk)
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
with conn.begin():
for fk in fks:
op.drop_constraint(
fk['name'], fk['source_table'], type_="foreignkey")
yield
with conn.begin():
for fk in fks:
op.create_foreign_key(
fk['name'], fk['source_table'],
fk['referred_table'],
fk['constrained_columns'],
fk['referred_columns'],
onupdate=fk['options'].get('onupdate'),
ondelete=fk['options'].get('ondelete'),
deferrable=fk['options'].get('deferrable'),
initially=fk['options'].get('initially'),
)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import gen_nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(
grad, array_ops.shape(op.inputs[1]), op.inputs[2],
op.get_attr("strides"), op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")),
nn_ops.conv2d(
grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax -
array_ops.reshape(math_ops.reduce_sum(grad_softmax * softmax, [1]),
[-1, 1]))
* softmax)
return grad_x
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad, reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(array_ops.shape(op.inputs[0]),
op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad,
op.get_attr("strides"), op.get_attr("padding")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0], array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"), op.get_attr("padding"))
]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0],
depth_radius, bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(array_ops.shape(op.inputs[0]), grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")
)
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0], grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")
)
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
| |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import logging
import traceback
import json
import subprocess
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import stack_tools
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from resource_management.core import shell
from resource_management.core.resources import Execute
from resource_management.core import global_lock
from resource_management.core.exceptions import Fail
from resource_management.libraries.script.script import Script
OK_MESSAGE = "The application reported a '{0}' state in {1:.3f}s"
MESSAGE_WITH_STATE_AND_INSTANCES = "The application reported a '{0}' state in {1:.3f}s. [Live: {2}, Desired: {3}]"
CRITICAL_MESSAGE_WITH_STATE = "The application reported a '{0}' state. Check took {1:.3f}s"
CRITICAL_MESSAGE = "Application information could not be retrieved"
# results codes
CRITICAL_RESULT_CODE = 'CRITICAL'
OK_RESULT_CODE = 'OK'
UKNOWN_STATUS_CODE = 'UNKNOWN'
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
HIVE_PRINCIPAL_KEY = '{{hive-interactive-site/hive.llap.daemon.service.principal}}'
HIVE_PRINCIPAL_DEFAULT = 'default.hive.principal'
HIVE_PRINCIPAL_KEYTAB_KEY = '{{hive-interactive-site/hive.llap.daemon.keytab.file}}'
HIVE_PRINCIPAL_KEYTAB_DEFAULT = 'default.hive.keytab'
HIVE_AUTHENTICATION_DEFAULT = 'NOSASL'
HIVE_USER_KEY = '{{hive-env/hive_user}}'
HIVE_USER_DEFAULT = 'default.smoke.user'
STACK_NAME = '{{cluster-env/stack_name}}'
STACK_ROOT = '{{cluster-env/stack_root}}'
STACK_ROOT_DEFAULT = Script.get_stack_root()
LLAP_APP_NAME_KEY = '{{hive-interactive-env/llap_app_name}}'
LLAP_APP_NAME_DEFAULT = 'llap0'
# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
CHECK_COMMAND_TIMEOUT_DEFAULT = 120.0
# Mapping of LLAP app states to 'user friendly' state names.
llap_app_state_dict = {'RUNNING_ALL': 'RUNNING',
'RUNNING_PARTIAL': 'RUNNING',
'COMPLETE': 'NOT RUNNING',
'LAUNCHING': 'LAUNCHING',
'APP_NOT_FOUND': 'APP NOT FOUND'}
logger = logging.getLogger('ambari_alerts')
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used
to build the dictionary passed into execute
"""
return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_PRINCIPAL_KEY, HIVE_PRINCIPAL_KEYTAB_KEY,
HIVE_USER_KEY, STACK_NAME, STACK_ROOT, LLAP_APP_NAME_KEY)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def execute(configurations={}, parameters={}, host_name=None):
"""
Returns a tuple containing the result code and a pre-formatted result label
Keyword arguments:
configurations (dictionary): a mapping of configuration key to value
parameters (dictionary): a mapping of script parameter key to value
host_name (string): the name of this host where the alert is running
"""
LLAP_APP_STATUS_CMD_TIMEOUT = 0
if configurations is None:
return ('UNKNOWN', ['There were no configurations supplied to the script.'])
result_code = None
try:
security_enabled = False
if SECURITY_ENABLED_KEY in configurations:
security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT
if CHECK_COMMAND_TIMEOUT_KEY in configurations:
check_command_timeout = int(parameters[CHECK_COMMAND_TIMEOUT_KEY])
hive_user = HIVE_USER_DEFAULT
if HIVE_USER_KEY in configurations:
hive_user = configurations[HIVE_USER_KEY]
llap_app_name = LLAP_APP_NAME_DEFAULT
if LLAP_APP_NAME_KEY in configurations:
llap_app_name = configurations[LLAP_APP_NAME_KEY]
if security_enabled:
if HIVE_PRINCIPAL_KEY in configurations:
llap_principal = configurations[HIVE_PRINCIPAL_KEY]
else:
llap_principal = HIVE_PRINCIPAL_DEFAULT
llap_principal = llap_principal.replace('_HOST',host_name.lower())
llap_keytab = HIVE_PRINCIPAL_KEYTAB_DEFAULT
if HIVE_PRINCIPAL_KEYTAB_KEY in configurations:
llap_keytab = configurations[HIVE_PRINCIPAL_KEYTAB_KEY]
# Get the configured Kerberos executable search paths, if any
if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
else:
kerberos_executable_search_paths = None
kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
kinitcmd=format("{kinit_path_local} -kt {llap_keytab} {llap_principal}; ")
# prevent concurrent kinit
kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
kinit_lock.acquire()
try:
Execute(kinitcmd, user=hive_user,
path=["/bin/", "/usr/bin/", "/usr/lib/hive/bin/", "/usr/sbin/"],
timeout=10)
finally:
kinit_lock.release()
start_time = time.time()
if STACK_NAME in configurations and STACK_ROOT in configurations:
stack_root = stack_tools.get_stack_root(configurations[STACK_NAME],
configurations[STACK_ROOT])
llap_status_cmd = stack_root + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
else:
llap_status_cmd = STACK_ROOT_DEFAULT + format("/current/hive-server2-hive2/bin/hive --service llapstatus --name {llap_app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
code, output, error = shell.checked_call(llap_status_cmd, user=hive_user, stderr=subprocess.PIPE,
timeout=check_command_timeout,
logoutput=False)
# Call for getting JSON
llap_app_info = make_valid_json(output)
if llap_app_info is None or 'state' not in llap_app_info:
alert_label = traceback.format_exc()
result_code = UKNOWN_STATUS_CODE
return (result_code, [alert_label])
retrieved_llap_app_state = llap_app_info['state'].upper()
if retrieved_llap_app_state in ['RUNNING_ALL']:
result_code = OK_RESULT_CODE
total_time = time.time() - start_time
alert_label = OK_MESSAGE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
elif retrieved_llap_app_state in ['RUNNING_PARTIAL']:
live_instances = 0
desired_instances = 0
percentInstancesUp = 0
percent_desired_instances_to_be_up = 80
# Get 'live' and 'desired' instances
if 'liveInstances' not in llap_app_info or 'desiredInstances' not in llap_app_info:
result_code = CRITICAL_RESULT_CODE
total_time = time.time() - start_time
alert_label = CRITICAL_MESSAGE_WITH_STATE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
return (result_code, [alert_label])
live_instances = llap_app_info['liveInstances']
desired_instances = llap_app_info['desiredInstances']
if live_instances < 0 or desired_instances <= 0:
result_code = CRITICAL_RESULT_CODE
total_time = time.time() - start_time
alert_label = CRITICAL_MESSAGE_WITH_STATE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
return (result_code, [alert_label])
percentInstancesUp = float(live_instances) / desired_instances * 100
if percentInstancesUp >= percent_desired_instances_to_be_up:
result_code = OK_RESULT_CODE
total_time = time.time() - start_time
alert_label = MESSAGE_WITH_STATE_AND_INSTANCES.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state),
total_time,
llap_app_info['liveInstances'],
llap_app_info['desiredInstances'])
else:
result_code = CRITICAL_RESULT_CODE
total_time = time.time() - start_time
alert_label = MESSAGE_WITH_STATE_AND_INSTANCES.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state),
total_time,
llap_app_info['liveInstances'],
llap_app_info['desiredInstances'])
else:
result_code = CRITICAL_RESULT_CODE
total_time = time.time() - start_time
alert_label = CRITICAL_MESSAGE_WITH_STATE.format(llap_app_state_dict.get(retrieved_llap_app_state, retrieved_llap_app_state), total_time)
except:
alert_label = traceback.format_exc()
traceback.format_exc()
result_code = UKNOWN_STATUS_CODE
return (result_code, [alert_label])
"""
Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in
to JSON converter.
"""
def make_valid_json(output):
'''
Note: It is assumed right now that extra lines will be only at the start and not at the end.
Sample expected JSON to be passed for 'loads' is either of the form :
Case 'A':
{
"amInfo" : {
"appName" : "llap0",
"appType" : "org-apache-slider",
"appId" : "APP1",
"containerId" : "container_1466036628595_0010_01_000001",
"hostname" : "hostName",
"amWebUrl" : "http://hostName:port/"
},
"state" : "LAUNCHING",
....
"desiredInstances" : 1,
"liveInstances" : 0,
....
....
}
or
Case 'B':
{
"state" : "APP_NOT_FOUND"
}
'''
splits = output.split("\n")
len_splits = len(splits)
if (len_splits < 3):
raise Fail("Malformed JSON data received from 'llapstatus' command. Exiting ....")
marker_idx = None # To detect where from to start reading for JSON data
for idx, split in enumerate(splits):
curr_elem = split.strip()
if idx + 2 > len_splits:
raise Fail(
"Iterated over the received 'llapstatus' comamnd. Couldn't validate the received output for JSON parsing.")
next_elem = (splits[(idx + 1)]).strip()
if curr_elem == "{":
if next_elem == "\"amInfo\" : {" and (splits[len_splits - 1]).strip() == '}':
# For Case 'A'
marker_idx = idx
break;
elif idx + 3 == len_splits and next_elem.startswith('"state" : ') and (splits[idx + 2]).strip() == '}':
# For Case 'B'
marker_idx = idx
break;
# Remove extra logging from possible JSON output
if marker_idx is None:
raise Fail("Couldn't validate the received output for JSON parsing.")
else:
if marker_idx != 0:
del splits[0:marker_idx]
scanned_output = '\n'.join(splits)
llap_app_info = json.loads(scanned_output)
return llap_app_info
| |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import mock
from st2common.content import utils as content_utils
from st2common.bootstrap.configsregistrar import ConfigsRegistrar
from st2common.persistence.pack import Pack
from st2common.persistence.pack import Config
from st2tests.api import SUPER_SECRET_PARAMETER
from st2tests.base import CleanDbTestCase
from st2tests import fixturesloader
__all__ = ["ConfigsRegistrarTestCase"]
PACK_1_PATH = os.path.join(
fixturesloader.get_fixtures_packs_base_path(), "dummy_pack_1"
)
PACK_6_PATH = os.path.join(
fixturesloader.get_fixtures_packs_base_path(), "dummy_pack_6"
)
PACK_19_PATH = os.path.join(
fixturesloader.get_fixtures_packs_base_path(), "dummy_pack_19"
)
PACK_11_PATH = os.path.join(
fixturesloader.get_fixtures_packs_base_path(), "dummy_pack_11"
)
PACK_22_PATH = os.path.join(
fixturesloader.get_fixtures_packs_base_path(), "dummy_pack_22"
)
class ConfigsRegistrarTestCase(CleanDbTestCase):
def test_register_configs_for_all_packs(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {"dummy_pack_1": PACK_1_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_from_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_dbs), 1)
config_db = config_dbs[0]
self.assertEqual(config_db.values["api_key"], "{{st2kv.user.api_key}}")
self.assertEqual(config_db.values["api_secret"], SUPER_SECRET_PARAMETER)
self.assertEqual(config_db.values["region"], "us-west-1")
def test_register_all_configs_invalid_config_no_config_schema(self):
# verify_ configs is on, but ConfigSchema for the pack doesn't exist so
# validation should proceed normally
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False, validate_configs=False)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {"dummy_pack_6": PACK_6_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_from_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_dbs), 1)
def test_register_all_configs_with_config_schema_validation_validation_failure_1(
self,
):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(
use_pack_cache=False, fail_on_failure=True, validate_configs=True
)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {"dummy_pack_6": PACK_6_PATH}
# Register ConfigSchema for pack
registrar._register_pack_db = mock.Mock()
registrar._register_pack(pack_name="dummy_pack_5", pack_dir=PACK_6_PATH)
packs_base_paths = content_utils.get_packs_base_paths()
if six.PY3:
expected_msg = (
'Failed validating attribute "regions" in config for pack '
"\"dummy_pack_6\" (.*?): 1000 is not of type 'array'"
)
else:
expected_msg = (
'Failed validating attribute "regions" in config for pack '
"\"dummy_pack_6\" (.*?): 1000 is not of type u'array'"
)
self.assertRaisesRegexp(
ValueError,
expected_msg,
registrar.register_from_packs,
base_dirs=packs_base_paths,
)
def test_register_all_configs_with_config_schema_validation_validation_failure_2(
self,
):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(
use_pack_cache=False, fail_on_failure=True, validate_configs=True
)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {"dummy_pack_19": PACK_19_PATH}
# Register ConfigSchema for pack
registrar._register_pack_db = mock.Mock()
registrar._register_pack(pack_name="dummy_pack_19", pack_dir=PACK_19_PATH)
packs_base_paths = content_utils.get_packs_base_paths()
if six.PY3:
expected_msg = (
'Failed validating attribute "instances.0.alias" in config for pack '
"\"dummy_pack_19\" (.*?): {'not': 'string'} is not of type "
"'string'"
)
else:
expected_msg = (
'Failed validating attribute "instances.0.alias" in config for pack '
"\"dummy_pack_19\" (.*?): {'not': 'string'} is not of type "
"u'string'"
)
self.assertRaisesRegexp(
ValueError,
expected_msg,
registrar.register_from_packs,
base_dirs=packs_base_paths,
)
def test_register_all_configs_with_config_schema_validation_validation_failure_3(
self,
):
# This test checks for values containing "decrypt_kv" jinja filter in the config
# object where keys have "secret: True" set in the schema.
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(
use_pack_cache=False, fail_on_failure=True, validate_configs=True
)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {"dummy_pack_11": PACK_11_PATH}
# Register ConfigSchema for pack
registrar._register_pack_db = mock.Mock()
registrar._register_pack(pack_name="dummy_pack_11", pack_dir=PACK_11_PATH)
packs_base_paths = content_utils.get_packs_base_paths()
expected_msg = (
'Values specified as "secret: True" in config schema are automatically '
'decrypted by default. Use of "decrypt_kv" jinja filter is not allowed '
"for such values. Please check the specified values in the config or "
"the default values in the schema."
)
self.assertRaisesRegexp(
ValueError,
expected_msg,
registrar.register_from_packs,
base_dirs=packs_base_paths,
)
def test_register_all_configs_with_config_schema_validation_validation_failure_4(
self,
):
# This test checks for default values containing "decrypt_kv" jinja filter for
# keys which have "secret: True" set.
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(
use_pack_cache=False, fail_on_failure=True, validate_configs=True
)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {"dummy_pack_22": PACK_22_PATH}
# Register ConfigSchema for pack
registrar._register_pack_db = mock.Mock()
registrar._register_pack(pack_name="dummy_pack_22", pack_dir=PACK_22_PATH)
packs_base_paths = content_utils.get_packs_base_paths()
expected_msg = (
'Values specified as "secret: True" in config schema are automatically '
'decrypted by default. Use of "decrypt_kv" jinja filter is not allowed '
"for such values. Please check the specified values in the config or "
"the default values in the schema."
)
self.assertRaisesRegexp(
ValueError,
expected_msg,
registrar.register_from_packs,
base_dirs=packs_base_paths,
)
| |
import logging
from flask import flash
from flask.ext.admin import form
from flask.ext.admin._compat import string_types
from flask.ext.admin.babel import gettext, ngettext, lazy_gettext
from flask.ext.admin.model import BaseModelView
from peewee import PrimaryKeyField, ForeignKeyField, Field, CharField, TextField
from wtfpeewee.orm import model_form
from flask.ext.admin.actions import action
from flask.ext.admin.contrib.peeweemodel import filters
from .form import CustomModelConverter, InlineModelConverter, save_inline
from .tools import get_primary_key, parse_like_term
class ModelView(BaseModelView):
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of
:class:`flask.ext.admin.contrib.peeweemodel.filters.BaseFilter` classes.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'))
"""
model_form_converter = CustomModelConverter
"""
Model form conversion class. Use this to implement custom field conversion logic.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
inline_model_form_converter = InlineModelConverter
"""
Inline model conversion class. If you need some kind of post-processing for inline
forms, you can customize behavior by doing something like this::
class MyInlineModelConverter(AdminModelConverter):
def post_process(self, form_class, info):
form_class.value = TextField('value')
return form_class
class MyAdminView(ModelView):
inline_model_form_converter = MyInlineModelConverter
"""
filter_converter = filters.FilterConverter()
"""
Field to filter converter.
Override this attribute to use non-default converter.
"""
fast_mass_delete = False
"""
If set to `False` and user deletes more than one model using actions,
all models will be read from the database and then deleted one by one
giving Peewee chance to manually cleanup any dependencies (many-to-many
relationships, etc).
If set to True, will run DELETE statement which is somewhat faster, but
might leave corrupted data if you forget to configure DELETE CASCADE
for your model.
"""
inline_models = None
"""
Inline related-model editing for models with parent to child relation.
Accept enumerable with one of the values:
1. Child model class::
class MyModelView(ModelView):
inline_models = (Post,)
2. Child model class and additional options::
class MyModelView(ModelView):
inline_models = [(Post, dict(form_columns=['title']))]
3. Django-like ``InlineFormAdmin`` class instance::
class MyInlineModelForm(InlineFormAdmin):
form_columns = ('title', 'date')
class MyModelView(ModelView):
inline_models = (MyInlineModelForm(MyInlineModel),)
You can customize generated field name by:
1. Using `form_name` property as option:
class MyModelView(ModelView):
inline_models = ((Post, dict(form_label='Hello')))
2. Using target model name with `fa_` prefis:
class Model1(Base):
# ...
pass
class Model2(Base):
# ...
pass
class MyModel1View(Base):
inline_models = (Model2,)
column_labels = {'fa_Model2': 'Hello'}
"""
def __init__(self, model, name=None,
category=None, endpoint=None, url=None):
self._search_fields = []
super(ModelView, self).__init__(model, name, category, endpoint, url)
self._primary_key = self.scaffold_pk()
def _get_model_fields(self, model=None):
if model is None:
model = self.model
return model._meta.get_sorted_fields()
def scaffold_pk(self):
return get_primary_key(self.model)
def get_pk_value(self, model):
return getattr(model, self._primary_key)
def scaffold_list_columns(self):
columns = []
for n, f in self._get_model_fields():
# Verify type
field_class = type(f)
if field_class == ForeignKeyField:
columns.append(n)
elif self.column_display_pk or field_class != PrimaryKeyField:
columns.append(n)
return columns
def scaffold_sortable_columns(self):
columns = dict()
for n, f in self._get_model_fields():
if self.column_display_pk or type(f) != PrimaryKeyField:
columns[n] = f
return columns
def init_search(self):
if self.column_searchable_list:
for p in self.column_searchable_list:
if isinstance(p, string_types):
p = getattr(self.model, p)
field_type = type(p)
# Check type
if (field_type != CharField and field_type != TextField):
raise Exception('Can only search on text columns. ' +
'Failed to setup search for "%s"' % p)
self._search_fields.append(p)
return bool(self._search_fields)
def scaffold_filters(self, name):
if isinstance(name, string_types):
attr = getattr(self.model, name, None)
else:
attr = name
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Check if field is in different model
if attr.model_class != self.model:
visible_name = '%s / %s' % (self.get_column_name(attr.model_class.__name__),
self.get_column_name(attr.name))
else:
if not isinstance(name, string_types):
visible_name = self.get_column_name(attr.name)
else:
visible_name = self.get_column_name(name)
type_name = type(attr).__name__
flt = self.filter_converter.convert(type_name,
attr,
visible_name)
return flt
def is_valid_filter(self, filter):
return isinstance(filter, filters.BasePeeweeFilter)
def scaffold_form(self):
form_class = model_form(self.model,
base_class=form.BaseForm,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args,
converter=self.model_form_converter())
if self.inline_models:
form_class = self.scaffold_inline_form_models(form_class)
return form_class
def scaffold_inline_form_models(self, form_class):
converter = self.model_form_converter()
inline_converter = self.inline_model_form_converter(self)
for m in self.inline_models:
form_class = inline_converter.contribute(converter,
self.model,
form_class,
m)
return form_class
def _handle_join(self, query, field, joins):
if field.model_class != self.model:
model_name = field.model_class.__name__
if model_name not in joins:
query = query.join(field.model_class)
joins.add(model_name)
return query
def _order_by(self, query, joins, sort_field, sort_desc):
if isinstance(sort_field, string_types):
field = getattr(self.model, sort_field)
query = query.order_by(field.desc() if sort_desc else field.asc())
elif isinstance(sort_field, Field):
if sort_field.model_class != self.model:
query = self._handle_join(query, sort_field, joins)
query = query.order_by(sort_field.desc() if sort_desc else sort_field.asc())
return query, joins
def get_query(self):
return self.model.select()
def get_list(self, page, sort_column, sort_desc, search, filters,
execute=True):
query = self.get_query()
joins = set()
# Search
if self._search_supported and search:
values = search.split(' ')
for value in values:
if not value:
continue
term = parse_like_term(value)
stmt = None
for field in self._search_fields:
query = self._handle_join(query, field, joins)
q = field ** term
if stmt is None:
stmt = q
else:
stmt |= q
query = query.where(stmt)
# Filters
if self._filters:
for flt, value in filters:
f = self._filters[flt]
query = self._handle_join(query, f.column, joins)
query = f.apply(query, value)
# Get count
count = query.count()
# Apply sorting
if sort_column is not None:
sort_field = self._sortable_columns[sort_column]
query, joins = self._order_by(query, joins, sort_field, sort_desc)
else:
order = self._get_default_order()
if order:
query, joins = self._order_by(query, joins, order[0], order[1])
# Pagination
if page is not None:
query = query.offset(page * self.page_size)
query = query.limit(self.page_size)
if execute:
query = list(query.execute())
return count, query
def get_one(self, id):
return self.model.get(**{self._primary_key: id})
def create_model(self, form):
try:
model = self.model()
form.populate_obj(model)
self.on_model_change(form, model)
model.save()
# For peewee have to save inline forms after model was saved
save_inline(form, model)
except Exception as ex:
flash(gettext('Failed to create model. %(error)s', error=str(ex)), 'error')
logging.exception('Failed to create model')
return False
else:
self.after_model_change(form, model, True)
return True
def update_model(self, form, model):
try:
form.populate_obj(model)
self.on_model_change(form, model)
model.save()
# For peewee have to save inline forms after model was saved
save_inline(form, model)
except Exception as ex:
flash(gettext('Failed to update model. %(error)s', error=str(ex)), 'error')
logging.exception('Failed to update model')
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
try:
self.on_model_delete(model)
model.delete_instance(recursive=True)
return True
except Exception as ex:
flash(gettext('Failed to delete model. %(error)s', error=str(ex)), 'error')
logging.exception('Failed to delete model')
return False
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected models?'))
def action_delete(self, ids):
try:
model_pk = getattr(self.model, self._primary_key)
if self.fast_mass_delete:
count = self.model.delete().where(model_pk << ids).execute()
else:
count = 0
query = self.model.select().filter(model_pk << ids)
for m in query:
m.delete_instance(recursive=True)
count += 1
flash(ngettext('Model was successfully deleted.',
'%(count)s models were successfully deleted.',
count,
count=count))
except Exception as ex:
flash(gettext('Failed to delete models. %(error)s', error=str(ex)), 'error')
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class MessageList(ListResource):
def __init__(self, version, service_sid, channel_sid):
"""
Initialize the MessageList
:param Version version: Version that contains the resource
:param service_sid: The SID of the Service that the resource is associated with
:param channel_sid: The SID of the Channel the Message resource belongs to
:returns: twilio.rest.chat.v2.service.channel.message.MessageList
:rtype: twilio.rest.chat.v2.service.channel.message.MessageList
"""
super(MessageList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'channel_sid': channel_sid, }
self._uri = '/Services/{service_sid}/Channels/{channel_sid}/Messages'.format(**self._solution)
def create(self, from_=values.unset, attributes=values.unset,
date_created=values.unset, date_updated=values.unset,
last_updated_by=values.unset, body=values.unset,
media_sid=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Create the MessageInstance
:param unicode from_: The Identity of the new message's author
:param unicode attributes: A valid JSON string that contains application-specific data
:param datetime date_created: The ISO 8601 date and time in GMT when the resource was created
:param datetime date_updated: The ISO 8601 date and time in GMT when the resource was updated
:param unicode last_updated_by: The Identity of the User who last updated the Message
:param unicode body: The message to send to the channel
:param unicode media_sid: The Media Sid to be attached to the new Message
:param MessageInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The created MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
"""
data = values.of({
'From': from_,
'Attributes': attributes,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
'LastUpdatedBy': last_updated_by,
'Body': body,
'MediaSid': media_sid,
})
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
payload = self._version.create(method='POST', uri=self._uri, data=data, headers=headers, )
return MessageInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
)
def stream(self, order=values.unset, limit=None, page_size=None):
"""
Streams MessageInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param MessageInstance.OrderType order: The sort order of the returned messages
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.service.channel.message.MessageInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(order=order, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, order=values.unset, limit=None, page_size=None):
"""
Lists MessageInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param MessageInstance.OrderType order: The sort order of the returned messages
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.service.channel.message.MessageInstance]
"""
return list(self.stream(order=order, limit=limit, page_size=page_size, ))
def page(self, order=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of MessageInstance records from the API.
Request is executed immediately
:param MessageInstance.OrderType order: The sort order of the returned messages
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessagePage
"""
data = values.of({
'Order': order,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return MessagePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of MessageInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessagePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return MessagePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a MessageContext
:param sid: The SID of the Message resource to fetch
:returns: twilio.rest.chat.v2.service.channel.message.MessageContext
:rtype: twilio.rest.chat.v2.service.channel.message.MessageContext
"""
return MessageContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a MessageContext
:param sid: The SID of the Message resource to fetch
:returns: twilio.rest.chat.v2.service.channel.message.MessageContext
:rtype: twilio.rest.chat.v2.service.channel.message.MessageContext
"""
return MessageContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V2.MessageList>'
class MessagePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the MessagePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The SID of the Service that the resource is associated with
:param channel_sid: The SID of the Channel the Message resource belongs to
:returns: twilio.rest.chat.v2.service.channel.message.MessagePage
:rtype: twilio.rest.chat.v2.service.channel.message.MessagePage
"""
super(MessagePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of MessageInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.channel.message.MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
"""
return MessageInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V2.MessagePage>'
class MessageContext(InstanceContext):
def __init__(self, version, service_sid, channel_sid, sid):
"""
Initialize the MessageContext
:param Version version: Version that contains the resource
:param service_sid: The SID of the Service to fetch the resource from
:param channel_sid: The SID of the Channel the message to fetch belongs to
:param sid: The SID of the Message resource to fetch
:returns: twilio.rest.chat.v2.service.channel.message.MessageContext
:rtype: twilio.rest.chat.v2.service.channel.message.MessageContext
"""
super(MessageContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'channel_sid': channel_sid, 'sid': sid, }
self._uri = '/Services/{service_sid}/Channels/{channel_sid}/Messages/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the MessageInstance
:returns: The fetched MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return MessageInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=self._solution['sid'],
)
def delete(self, x_twilio_webhook_enabled=values.unset):
"""
Deletes the MessageInstance
:param MessageInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
return self._version.delete(method='DELETE', uri=self._uri, headers=headers, )
def update(self, body=values.unset, attributes=values.unset,
date_created=values.unset, date_updated=values.unset,
last_updated_by=values.unset, from_=values.unset,
x_twilio_webhook_enabled=values.unset):
"""
Update the MessageInstance
:param unicode body: The message to send to the channel
:param unicode attributes: A valid JSON string that contains application-specific data
:param datetime date_created: The ISO 8601 date and time in GMT when the resource was created
:param datetime date_updated: The ISO 8601 date and time in GMT when the resource was updated
:param unicode last_updated_by: The Identity of the User who last updated the Message, if applicable
:param unicode from_: The Identity of the message's author
:param MessageInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The updated MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
"""
data = values.of({
'Body': body,
'Attributes': attributes,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
'LastUpdatedBy': last_updated_by,
'From': from_,
})
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
payload = self._version.update(method='POST', uri=self._uri, data=data, headers=headers, )
return MessageInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V2.MessageContext {}>'.format(context)
class MessageInstance(InstanceResource):
class OrderType(object):
ASC = "asc"
DESC = "desc"
class WebhookEnabledType(object):
TRUE = "true"
FALSE = "false"
def __init__(self, version, payload, service_sid, channel_sid, sid=None):
"""
Initialize the MessageInstance
:returns: twilio.rest.chat.v2.service.channel.message.MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
"""
super(MessageInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'attributes': payload.get('attributes'),
'service_sid': payload.get('service_sid'),
'to': payload.get('to'),
'channel_sid': payload.get('channel_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'last_updated_by': payload.get('last_updated_by'),
'was_edited': payload.get('was_edited'),
'from_': payload.get('from'),
'body': payload.get('body'),
'index': deserialize.integer(payload.get('index')),
'type': payload.get('type'),
'media': payload.get('media'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {
'service_sid': service_sid,
'channel_sid': channel_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: MessageContext for this MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageContext
"""
if self._context is None:
self._context = MessageContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def attributes(self):
"""
:returns: The JSON string that stores application-specific data
:rtype: unicode
"""
return self._properties['attributes']
@property
def service_sid(self):
"""
:returns: The SID of the Service that the resource is associated with
:rtype: unicode
"""
return self._properties['service_sid']
@property
def to(self):
"""
:returns: The SID of the Channel that the message was sent to
:rtype: unicode
"""
return self._properties['to']
@property
def channel_sid(self):
"""
:returns: The SID of the Channel the Message resource belongs to
:rtype: unicode
"""
return self._properties['channel_sid']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def last_updated_by(self):
"""
:returns: The Identity of the User who last updated the Message
:rtype: unicode
"""
return self._properties['last_updated_by']
@property
def was_edited(self):
"""
:returns: Whether the message has been edited since it was created
:rtype: bool
"""
return self._properties['was_edited']
@property
def from_(self):
"""
:returns: The Identity of the message's author
:rtype: unicode
"""
return self._properties['from_']
@property
def body(self):
"""
:returns: The content of the message
:rtype: unicode
"""
return self._properties['body']
@property
def index(self):
"""
:returns: The index of the message within the Channel
:rtype: unicode
"""
return self._properties['index']
@property
def type(self):
"""
:returns: The Message type
:rtype: unicode
"""
return self._properties['type']
@property
def media(self):
"""
:returns: A Media object that describes the Message's media if attached; otherwise, null
:rtype: dict
"""
return self._properties['media']
@property
def url(self):
"""
:returns: The absolute URL of the Message resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the MessageInstance
:returns: The fetched MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
"""
return self._proxy.fetch()
def delete(self, x_twilio_webhook_enabled=values.unset):
"""
Deletes the MessageInstance
:param MessageInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete(x_twilio_webhook_enabled=x_twilio_webhook_enabled, )
def update(self, body=values.unset, attributes=values.unset,
date_created=values.unset, date_updated=values.unset,
last_updated_by=values.unset, from_=values.unset,
x_twilio_webhook_enabled=values.unset):
"""
Update the MessageInstance
:param unicode body: The message to send to the channel
:param unicode attributes: A valid JSON string that contains application-specific data
:param datetime date_created: The ISO 8601 date and time in GMT when the resource was created
:param datetime date_updated: The ISO 8601 date and time in GMT when the resource was updated
:param unicode last_updated_by: The Identity of the User who last updated the Message, if applicable
:param unicode from_: The Identity of the message's author
:param MessageInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The updated MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
"""
return self._proxy.update(
body=body,
attributes=attributes,
date_created=date_created,
date_updated=date_updated,
last_updated_by=last_updated_by,
from_=from_,
x_twilio_webhook_enabled=x_twilio_webhook_enabled,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V2.MessageInstance {}>'.format(context)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in math_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SquaredDifferenceOpTest(test.TestCase):
def _testGrad(self, left_shape, right_shape):
if len(left_shape) > len(right_shape):
output_shape = left_shape
else:
output_shape = right_shape
l = np.random.randn(*left_shape)
r = np.random.randn(*right_shape)
with self.test_session(use_gpu=True):
left_tensor = constant_op.constant(l, shape=left_shape)
right_tensor = constant_op.constant(r, shape=right_shape)
output = math_ops.squared_difference(left_tensor, right_tensor)
left_err = gradient_checker.compute_gradient_error(
left_tensor, left_shape, output, output_shape, x_init_value=l)
right_err = gradient_checker.compute_gradient_error(
right_tensor, right_shape, output, output_shape, x_init_value=r)
self.assertLess(left_err, 1e-10)
self.assertLess(right_err, 1e-10)
def testGrad(self):
self._testGrad([1, 2, 3, 2], [3, 2])
self._testGrad([2, 4], [3, 2, 4])
class AbsOpTest(test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
np.random.seed(7)
if dtype in (dtypes.complex64, dtypes.complex128):
value = math_ops.complex(
self._biasedRandN(
shape, bias=bias, sigma=sigma),
self._biasedRandN(
shape, bias=bias, sigma=sigma))
else:
value = ops.convert_to_tensor(
self._biasedRandN(
shape, bias=bias), dtype=dtype)
with self.test_session(use_gpu=True):
output = math_ops.abs(value)
error = gradient_checker.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
class MinOrMaxGradientTest(test.TestCase):
def testMinGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
def testMaxGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
class MaximumOrMinimumGradientTest(test.TestCase):
def testMaximumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.maximum(inputs, 3.0)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
def testMinimumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.minimum(inputs, 2.0)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
class ProdGradientTest(test.TestCase):
def testProdGradient(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testProdGradientForNegativeAxis(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs, -1)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testProdGradientComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]],
dtype=dtype)
outputs = math_ops.reduce_prod(inputs)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testProdGradientForNegativeAxisComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]],
dtype=dtype)
outputs = math_ops.reduce_prod(inputs, -1)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
class SegmentMinOrMaxGradientTest(test.TestCase):
def testSegmentMinGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_min,
[2])
self.assertLess(error, 1e-4)
def testSegmentMaxGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_max,
[2])
self.assertLess(error, 1e-4)
def testSegmentMinGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_min,
[1])
self.assertLess(error, 1e-4)
def testSegmentMaxGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_max,
[1])
self.assertLess(error, 1e-4)
class FloorModGradientTest(test.TestCase):
def testFloorModGradient(self):
# Making sure the input is not near the discontinuity point where
# x/y == floor(x/y)
ns = constant_op.constant([17.], dtype=dtypes.float32)
inputs = constant_op.constant([131.], dtype=dtypes.float32)
floor_mod = math_ops.floormod(inputs, ns)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1],
floor_mod, [1])
self.assertLess(error, 1e-4)
class DivNoNanGradientTest(test.TestCase):
def testBasicGradient(self):
inputs = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(inputs, 1 + math_ops.abs(inputs))
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs,
inputs.get_shape().as_list(), outputs,
outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testGradientWithDenominatorIsZero(self):
x = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
y = array_ops.zeros_like(x,
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(x, y)
with self.cached_session():
dx, dy = gradients.gradients(outputs, [x, y])
self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list()))
self.assertAllClose(dy.eval(), np.zeros(y.shape.as_list()))
if __name__ == "__main__":
test.main()
| |
"""
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
try:
from urllib.parse import urlparse
except ImportError: # Python 2
from urlparse import urlparse
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils import datetime_safe
from django.utils.six.moves import StringIO
from django.utils.timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependant results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + "%+03d%02d" % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
if is_aware(date):
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + "%+03d:%02d" % (hour, minute)
else:
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_unicode(s, strings_only=True)
if categories:
categories = [force_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_unicode(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None,
ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_unicode(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_unicode(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement("guid", item['unique_id'])
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement("updated", rfc3339_date(item['pubdate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| |
#!/usr/bin/python
# vim: set fileencoding=utf-8
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.contrib.auth.decorators import permission_required
from django.shortcuts import render_to_response, get_object_or_404
from django.db import transaction
from django.conf import settings
from datetime import datetime
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from questionnaire import QuestionProcessors
from questionnaire import questionnaire_start, questionset_start, questionset_done, questionnaire_done
from questionnaire import AnswerException
from questionnaire import Processors
from questionnaire.models import *
from questionnaire.parsers import *
from questionnaire.emails import _send_email, send_emails
from questionnaire.utils import numal_sort, split_numal
from questionnaire.request_cache import request_cache
from questionnaire import profiler
from compat import commit_on_success
import logging
import random
from hashlib import md5
import re
try:
use_session = settings.QUESTIONNAIRE_USE_SESSION
except AttributeError:
use_session = False
def r2r(tpl, request, **contextdict):
"Shortcut to use RequestContext instead of Context in templates"
contextdict['request'] = request
return render_to_response(tpl, contextdict, context_instance = RequestContext(request))
def get_runinfo(random):
"Return the RunInfo entry with the provided random key"
res = RunInfo.objects.filter(random=random.lower())
return res and res[0] or None
def get_question(number, questionnaire):
"Return the specified Question (by number) from the specified Questionnaire"
res = Question.objects.filter(number=number, questionset__questionnaire=questionnaire)
return res and res[0] or None
def delete_answer(question, subject, runid):
"Delete the specified question/subject/runid combination from the Answer table"
Answer.objects.filter(subject=subject, runid=runid, question=question).delete()
def add_answer(runinfo, question, answer_dict):
"""
Add an Answer to a Question for RunInfo, given the relevant form input
answer_dict contains the POST'd elements for this question, minus the
question_{number} prefix. The question_{number} form value is accessible
with the ANSWER key.
"""
answer = Answer()
answer.question = question
answer.subject = runinfo.subject
answer.runid = runinfo.runid
type = question.get_type()
if "ANSWER" not in answer_dict:
answer_dict['ANSWER'] = None
if type in Processors:
answer.answer = Processors[type](question, answer_dict) or ''
else:
raise AnswerException("No Processor defined for question type %s" % type)
# first, delete all existing answers to this question for this particular user+run
delete_answer(question, runinfo.subject, runinfo.runid)
# then save the new answer to the database
answer.save(runinfo)
return True
def check_parser(runinfo, exclude=[]):
depparser = BooleanParser(dep_check, runinfo, {})
tagparser = BooleanParser(has_tag, runinfo)
fnmap = {
"maleonly": lambda v: runinfo.subject.gender == 'male',
"femaleonly": lambda v: runinfo.subject.gender == 'female',
"shownif": lambda v: v and depparser.parse(v),
"iftag": lambda v: v and tagparser.parse(v)
}
for ex in exclude:
del fnmap[ex]
@request_cache()
def satisfies_checks(checks):
if not checks:
return True
checks = parse_checks(checks)
for check, value in checks.items():
if check in fnmap:
value = value and value.strip()
if not fnmap[check](value):
return False
return True
return satisfies_checks
@request_cache()
def skipped_questions(runinfo):
if not runinfo.skipped:
return []
return [s.strip() for s in runinfo.skipped.split(',')]
@request_cache()
def question_satisfies_checks(question, runinfo, checkfn=None):
if question.number in skipped_questions(runinfo):
return False
checkfn = checkfn or check_parser(runinfo)
return checkfn(question.checks)
@request_cache(keyfn=lambda *args: args[0].id)
def questionset_satisfies_checks(questionset, runinfo, checks=None):
"""Return True if the runinfo passes the checks specified in the QuestionSet
Checks is an optional dictionary with the keys being questionset.pk and the
values being the checks of the contained questions.
This, in conjunction with fetch_checks allows for fewer
db roundtrips and greater performance.
Sadly, checks cannot be hashed and therefore the request cache is useless
here. Thankfully the benefits outweigh the costs in my tests.
"""
passes = check_parser(runinfo)
if not passes(questionset.checks):
return False
if not checks:
checks = dict()
checks[questionset.id] = []
for q in questionset.questions():
checks[questionset.id].append((q.checks, q.number))
# questionsets that pass the checks but have no questions are shown
# (comments, last page, etc.)
if not checks[questionset.id]:
return True
# if there are questions at least one needs to be visible
for check, number in checks[questionset.id]:
if number in skipped_questions(runinfo):
continue
if passes(check):
return True
return False
def get_progress(runinfo):
position, total = 0, 0
current = runinfo.questionset
sets = current.questionnaire.questionsets()
checks = fetch_checks(sets)
# fetch the all question checks at once. This greatly improves the
# performance of the questionset_satisfies_checks function as it
# can avoid a roundtrip to the database for each question
for qs in sets:
if questionset_satisfies_checks(qs, runinfo, checks):
total += 1
if qs.id == current.id:
position = total
if not all((position, total)):
progress = 1
else:
progress = float(position) / float(total) * 100.00
# progress is always at least one percent
progress = progress >= 1.0 and progress or 1
return int(progress)
def get_async_progress(request, *args, **kwargs):
""" Returns the progress as json for use with ajax """
if 'runcode' in kwargs:
runcode = kwargs['runcode']
else:
session_runcode = request.session.get('runcode', None)
if session_runcode is not None:
runcode = session_runcode
runinfo = get_runinfo(runcode)
response = dict(progress=get_progress(runinfo))
cache.set('progress' + runinfo.random, response['progress'])
response = HttpResponse(json.dumps(response),
content_type='application/javascript');
response["Cache-Control"] = "no-cache"
return response
def fetch_checks(questionsets):
ids = [qs.pk for qs in questionsets]
query = Question.objects.filter(questionset__pk__in=ids)
query = query.values('questionset_id', 'checks', 'number')
checks = dict()
for qsid in ids:
checks[qsid] = list()
for result in (r for r in query):
checks[result['questionset_id']].append(
(result['checks'], result['number'])
)
return checks
def redirect_to_qs(runinfo, request=None):
"Redirect to the correct and current questionset URL for this RunInfo"
# cache current questionset
qs = runinfo.questionset
# skip questionsets that don't pass
if not questionset_satisfies_checks(runinfo.questionset, runinfo):
next = runinfo.questionset.next()
while next and not questionset_satisfies_checks(next, runinfo):
next = next.next()
runinfo.questionset = next
runinfo.save()
hasquestionset = bool(next)
else:
hasquestionset = True
# empty ?
if not hasquestionset:
logging.warn('no questionset in questionnaire which passes the check')
return finish_questionnaire(request, runinfo, qs.questionnaire)
if not use_session:
args = [runinfo.random, runinfo.questionset.sortid]
urlname = 'questionset'
else:
args = []
request.session['qs'] = runinfo.questionset.sortid
request.session['runcode'] = runinfo.random
urlname = 'questionnaire'
url = reverse(urlname, args=args)
return HttpResponseRedirect(url)
def redirect_to_prev_questionnaire(request):
"""
Used only when ```QUESTIONNAIRE_USE_SESSION``` is True.
Takes the questionnaire set in the session and redirects to the
previous questionnaire if any.
"""
runcode = request.session.get('runcode', None)
if runcode is not None:
runinfo = get_runinfo(runcode)
prev_qs = runinfo.questionset.prev()
if runinfo and prev_qs:
request.session['runcode'] = runinfo.random
request.session['qs'] = prev_qs.sortid
return HttpResponseRedirect(reverse('questionnaire'))
return HttpResponseRedirect('/')
@commit_on_success
def questionnaire(request, runcode=None, qs=None):
"""
Process submitted answers (if present) and redirect to next page
If this is a POST request, parse the submitted data in order to store
all the submitted answers. Then return to the next questionset or
return a completed response.
If this isn't a POST request, redirect to the main page.
We only commit on success, to maintain consistency. We also specifically
rollback if there were errors processing the answers for this questionset.
"""
if use_session:
session_runcode = request.session.get('runcode', None)
if session_runcode is not None:
runcode = session_runcode
session_qs = request.session.get('qs', None)
if session_qs is not None:
qs = session_qs
# if runcode provided as query string, redirect to the proper page
if not runcode:
runcode = request.GET.get('runcode')
if not runcode:
return HttpResponseRedirect("/")
else:
if not use_session:
args = [runcode, ]
else:
request.session['runcode'] = runcode
args = []
return HttpResponseRedirect(reverse("questionnaire", args=args))
runinfo = get_runinfo(runcode)
if not runinfo:
transaction.commit()
return HttpResponseRedirect('/')
# let the runinfo have a piggy back ride on the request
# so we can easily use the runinfo in places like the question processor
# without passing it around
request.runinfo = runinfo
if not qs:
# Only change the language to the subjects choice for the initial
# questionnaire page (may be a direct link from an email)
if hasattr(request, 'session'):
request.session['django_language'] = runinfo.subject.language
translation.activate(runinfo.subject.language)
if 'lang' in request.GET:
return set_language(request, runinfo, request.path)
# --------------------------------
# --- Handle non-POST requests ---
# --------------------------------
if request.method != "POST":
if qs is not None:
qs = get_object_or_404(QuestionSet, sortid=qs, questionnaire=runinfo.questionset.questionnaire)
if runinfo.random.startswith('test:'):
pass # ok for testing
elif qs.sortid > runinfo.questionset.sortid:
# you may jump back, but not forwards
return redirect_to_qs(runinfo, request)
runinfo.questionset = qs
runinfo.save()
transaction.commit()
# no questionset id in URL, so redirect to the correct URL
if qs is None:
return redirect_to_qs(runinfo, request)
questionset_start.send(sender=None, runinfo=runinfo, questionset=qs)
return show_questionnaire(request, runinfo)
# -------------------------------------
# --- Process POST with QuestionSet ---
# -------------------------------------
# if the submitted page is different to what runinfo says, update runinfo
# XXX - do we really want this?
qs = request.POST.get('questionset_id', qs)
try:
qsobj = QuestionSet.objects.filter(pk=qs)[0]
if qsobj.questionnaire == runinfo.questionset.questionnaire:
if runinfo.questionset != qsobj:
runinfo.questionset = qsobj
runinfo.save()
except:
pass
questionnaire = runinfo.questionset.questionnaire
questionset = runinfo.questionset
# to confirm that we have the correct answers
expected = questionset.questions()
items = request.POST.items()
extra = {} # question_object => { "ANSWER" : "123", ... }
# this will ensure that each question will be processed, even if we did not receive
# any fields for it. Also works to ensure the user doesn't add extra fields in
for x in expected:
items.append( (u'question_%s_Trigger953' % x.number, None) )
# generate the answer_dict for each question, and place in extra
for item in items:
key, value = item[0], item[1]
if key.startswith('question_'):
answer = key.split("_", 2)
question = get_question(answer[1], questionnaire)
if not question:
logging.warn("Unknown question when processing: %s" % answer[1])
continue
extra[question] = ans = extra.get(question, {})
if(len(answer) == 2):
ans['ANSWER'] = value
elif(len(answer) == 3):
ans[answer[2]] = value
else:
logging.warn("Poorly formed form element name: %r" % answer)
continue
extra[question] = ans
errors = {}
for question, ans in extra.items():
if not question_satisfies_checks(question, runinfo):
continue
if u"Trigger953" not in ans:
logging.warn("User attempted to insert extra question (or it's a bug)")
continue
try:
cd = question.getcheckdict()
# requiredif is the new way
depon = cd.get('requiredif',None) or cd.get('dependent',None)
if depon:
depparser = BooleanParser(dep_check, runinfo, extra)
if not depparser.parse(depon):
# if check is not the same as answer, then we don't care
# about this question plus we should delete it from the DB
delete_answer(question, runinfo.subject, runinfo.runid)
if cd.get('store', False):
runinfo.set_cookie(question.number, None)
continue
add_answer(runinfo, question, ans)
if cd.get('store', False):
runinfo.set_cookie(question.number, ans['ANSWER'])
except AnswerException, e:
errors[question.number] = e
except Exception:
logging.exception("Unexpected Exception")
transaction.rollback()
raise
if len(errors) > 0:
res = show_questionnaire(request, runinfo, errors=errors)
transaction.rollback()
return res
questionset_done.send(sender=None,runinfo=runinfo,questionset=questionset)
next = questionset.next()
while next and not questionset_satisfies_checks(next, runinfo):
next = next.next()
runinfo.questionset = next
runinfo.save()
if use_session:
request.session['prev_runcode'] = runinfo.random
if next is None: # we are finished
return finish_questionnaire(request, runinfo, questionnaire)
transaction.commit()
return redirect_to_qs(runinfo, request)
def finish_questionnaire(request, runinfo, questionnaire):
hist = RunInfoHistory()
hist.subject = runinfo.subject
hist.runid = runinfo.runid
hist.completed = datetime.now()
hist.questionnaire = questionnaire
hist.tags = runinfo.tags
hist.skipped = runinfo.skipped
hist.save()
questionnaire_done.send(sender=None, runinfo=runinfo,
questionnaire=questionnaire)
redirect_url = questionnaire.redirect_url
for x,y in (('$LANG', translation.get_language()),
('$SUBJECTID', runinfo.subject.id),
('$RUNID', runinfo.runid),):
redirect_url = redirect_url.replace(x, str(y))
if runinfo.runid in ('12345', '54321') \
or runinfo.runid.startswith('test:'):
runinfo.questionset = QuestionSet.objects.filter(questionnaire=questionnaire).order_by('sortid')[0]
runinfo.save()
else:
runinfo.delete()
transaction.commit()
if redirect_url:
return HttpResponseRedirect(redirect_url)
return r2r("questionnaire/complete.$LANG.html", request)
def show_questionnaire(request, runinfo, errors={}):
"""
Return the QuestionSet template
Also add the javascript dependency code.
"""
request.runinfo = runinfo
if request.GET.get('show_all') == '1': # for debugging purposes.
questions = runinfo.questionset.questionnaire.questions()
else:
questions = runinfo.questionset.questions()
show_all = request.GET.get('show_all') == '1' # for debugging purposes in some cases we may want to show all questions on one screen.
questionset = runinfo.questionset
questions = questionset.questionnaire.questions() if show_all else questionset.questions()
qlist = []
jsinclude = [] # js files to include
cssinclude = [] # css files to include
jstriggers = []
qvalues = {}
# initialize qvalues
cookiedict = runinfo.get_cookiedict()
for k,v in cookiedict.items():
qvalues[k] = v
substitute_answer(qvalues, runinfo.questionset)
for question in questions:
# if we got here the questionset will at least contain one question
# which passes, so this is all we need to check for
question_visible = question_satisfies_checks(question, runinfo) or show_all
Type = question.get_type()
_qnum, _qalpha = split_numal(question.number)
qdict = {
'css_style': '' if question_visible else 'display:none;',
'template' : 'questionnaire/%s.html' % (Type),
'qnum' : _qnum,
'qalpha' : _qalpha,
'qtype' : Type,
'qnum_class' : (_qnum % 2 == 0) and " qeven" or " qodd",
'qalpha_class' : _qalpha and (ord(_qalpha[-1]) % 2 \
and ' alodd' or ' aleven') or '',
}
# substitute answer texts
substitute_answer(qvalues, question)
# add javascript dependency checks
cd = question.getcheckdict()
depon = cd.get('requiredif',None) or cd.get('dependent',None)
if depon:
# extra args to BooleanParser are not required for toString
parser = BooleanParser(dep_check)
qdict['checkstring'] = ' checks="%s"' % parser.toString(depon)
jstriggers.append('qc_%s' % question.number)
if 'default' in cd and not question.number in cookiedict:
qvalues[question.number] = cd['default']
if Type in QuestionProcessors:
qdict.update(QuestionProcessors[Type](request, question))
if 'jsinclude' in qdict:
if qdict['jsinclude'] not in jsinclude:
jsinclude.extend(qdict['jsinclude'])
if 'cssinclude' in qdict:
if qdict['cssinclude'] not in cssinclude:
cssinclude.extend(qdict['jsinclude'])
if 'jstriggers' in qdict:
jstriggers.extend(qdict['jstriggers'])
if 'qvalue' in qdict and not question.number in cookiedict:
qvalues[question.number] = qdict['qvalue']
qlist.append( (question, qdict) )
try:
has_progress = settings.QUESTIONNAIRE_PROGRESS in ('async', 'default')
async_progress = settings.QUESTIONNAIRE_PROGRESS == 'async'
except AttributeError:
has_progress = True
async_progress = False
if has_progress:
if async_progress:
progress = cache.get('progress' + runinfo.random, 1)
else:
progress = get_progress(runinfo)
else:
progress = 0
if request.POST:
for k,v in request.POST.items():
if k.startswith("question_"):
s = k.split("_")
if len(s) == 4:
qvalues[s[1]+'_'+v] = '1' # evaluates true in JS
elif len(s) == 3 and s[2] == 'comment':
qvalues[s[1]+'_'+s[2]] = v
else:
qvalues[s[1]] = v
if use_session:
prev_url = reverse('redirect_to_prev_questionnaire')
else:
prev_url = 'javascript:history.back();'
r = r2r("questionnaire/questionset.html", request,
questionset=runinfo.questionset,
runinfo=runinfo,
errors=errors,
qlist=qlist,
progress=progress,
triggers=jstriggers,
qvalues=qvalues,
jsinclude=jsinclude,
cssinclude=cssinclude,
async_progress=async_progress,
async_url=reverse('progress', args=[runinfo.random]),
prev_url=prev_url,
)
r['Cache-Control'] = 'no-cache'
r['Expires'] = "Thu, 24 Jan 1980 00:00:00 GMT"
return r
def substitute_answer(qvalues, obj):
"""Objects with a 'text/text_xx' attribute can contain magic strings
referring to the answers of other questions. This function takes
any such object, goes through the stored answers (qvalues) and replaces
the magic string with the actual value. If this isn't possible the
magic string is removed from the text.
Only answers with 'store' in their check will work with this.
"""
if qvalues and obj.text:
magic = 'subst_with_ans_'
regex =r'subst_with_ans_(\S+)'
replacements = re.findall(regex, obj.text)
text_attributes = [a for a in dir(obj) if a.startswith('text_')]
for answerid in replacements:
target = magic + answerid
replacement = qvalues.get(answerid.lower(), '')
for attr in text_attributes:
oldtext = getattr(obj, attr)
newtext = oldtext.replace(target, replacement)
setattr(obj, attr, newtext)
def set_language(request, runinfo=None, next=None):
"""
Change the language, save it to runinfo if provided, and
redirect to the provided URL (or the last URL).
Can also be used by a url handler, w/o runinfo & next.
"""
if not next:
next = request.REQUEST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = HttpResponseRedirect(next)
response['Expires'] = "Thu, 24 Jan 1980 00:00:00 GMT"
if request.method == 'GET':
lang_code = request.GET.get('lang', None)
if lang_code and translation.check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
if runinfo:
runinfo.subject.language = lang_code
runinfo.subject.save()
return response
def _table_headers(questions):
"""
Return the header labels for a set of questions as a list of strings.
This will create separate columns for each multiple-choice possiblity
and freeform options, to avoid mixing data types and make charting easier.
"""
ql = list(questions)
ql.sort(lambda x, y: numal_sort(x.number, y.number))
columns = []
for q in ql:
if q.type == 'choice-yesnocomment':
columns.extend([q.number, q.number + "-freeform"])
elif q.type == 'choice-freeform':
columns.extend([q.number, q.number + "-freeform"])
elif q.type.startswith('choice-multiple'):
cl = [c.value for c in q.choice_set.all()]
cl.sort(numal_sort)
columns.extend([q.number + '-' + value for value in cl])
if q.type == 'choice-multiple-freeform':
columns.append(q.number + '-freeform')
else:
columns.append(q.number)
return columns
@permission_required("questionnaire.export")
def export_csv(request, qid): # questionnaire_id
"""
For a given questionnaire id, generaete a CSV containing all the
answers for all subjects.
"""
import tempfile, csv, cStringIO, codecs
from django.core.servers.basehttp import FileWrapper
class UnicodeWriter:
"""
COPIED from http://docs.python.org/library/csv.html example:
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([unicode(s).encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
fd = tempfile.TemporaryFile()
questionnaire = get_object_or_404(Questionnaire, pk=int(qid))
headings, answers = answer_export(questionnaire)
writer = UnicodeWriter(fd)
writer.writerow([u'subject', u'runid'] + headings)
for subject, runid, answer_row in answers:
row = ["%s/%s" % (subject.id, subject.state), runid] + [
a if a else '--' for a in answer_row]
writer.writerow(row)
response = HttpResponse(FileWrapper(fd), content_type="text/csv")
response['Content-Length'] = fd.tell()
response['Content-Disposition'] = 'attachment; filename="export-%s.csv"' % qid
fd.seek(0)
return response
def answer_export(questionnaire, answers=None):
"""
questionnaire -- questionnaire model for export
answers -- query set of answers to include in export, defaults to all
Return a flat dump of column headings and all the answers for a
questionnaire (in query set answers) in the form (headings, answers)
where headings is:
['question1 number', ...]
and answers is:
[(subject1, 'runid1', ['answer1.1', ...]), ... ]
The headings list might include items with labels like
'questionnumber-freeform'. Those columns will contain all the freeform
answers for that question (separated from the other answer data).
Multiple choice questions will have one column for each choice with
labels like 'questionnumber-choice'.
The items in the answers list are unicode strings or empty strings
if no answer was given. The number of elements in each answer list will
always match the number of headings.
"""
if answers is None:
answers = Answer.objects.all()
answers = answers.filter(
question__questionset__questionnaire=questionnaire).order_by(
'subject', 'runid', 'question__questionset__sortid', 'question__number')
answers = answers.select_related()
questions = Question.objects.filter(
questionset__questionnaire=questionnaire)
headings = _table_headers(questions)
coldict = {}
for num, col in enumerate(headings): # use coldict to find column indexes
coldict[col] = num
# collect choices for each question
qchoicedict = {}
for q in questions:
qchoicedict[q.id] = [x[0] for x in q.choice_set.values_list('value')]
runid = subject = None
out = []
row = []
for answer in answers:
if answer.runid != runid or answer.subject != subject:
if row:
out.append((subject, runid, row))
runid = answer.runid
subject = answer.subject
row = [""] * len(headings)
ans = answer.split_answer()
if type(ans) == int:
ans = str(ans)
for choice in ans:
col = None
if type(choice) == list:
# freeform choice
choice = choice[0]
col = coldict.get(answer.question.number + '-freeform', None)
if col is None: # look for enumerated choice column (multiple-choice)
col = coldict.get(answer.question.number + '-' + unicode(choice), None)
if col is None: # single-choice items
if ((not qchoicedict[answer.question.id]) or
choice in qchoicedict[answer.question.id]):
col = coldict.get(answer.question.number, None)
if col is None: # last ditch, if not found throw it in a freeform column
col = coldict.get(answer.question.number + '-freeform', None)
if col is not None:
row[col] = choice
# and don't forget about the last one
if row:
out.append((subject, runid, row))
return headings, out
def answer_summary(questionnaire, answers=None):
"""
questionnaire -- questionnaire model for summary
answers -- query set of answers to include in summary, defaults to all
Return a summary of the answer totals in answer_qs in the form:
[('q1', 'question1 text',
[('choice1', 'choice1 text', num), ...],
['freeform1', ...]), ...]
questions are returned in questionnaire order
choices are returned in question order
freeform options are case-insensitive sorted
"""
if answers is None:
answers = Answer.objects.all()
answers = answers.filter(question__questionset__questionnaire=questionnaire)
questions = Question.objects.filter(
questionset__questionnaire=questionnaire).order_by(
'questionset__sortid', 'number')
summary = []
for question in questions:
q_type = question.get_type()
if q_type.startswith('choice-yesno'):
choices = [('yes', _('Yes')), ('no', _('No'))]
if 'dontknow' in q_type:
choices.append(('dontknow', _("Don't Know")))
elif q_type.startswith('choice'):
choices = [(c.value, c.text) for c in question.choices()]
else:
choices = []
choice_totals = dict([(k, 0) for k, v in choices])
freeforms = []
for a in answers.filter(question=question):
ans = a.split_answer()
for choice in ans:
if type(choice) == list:
freeforms.extend(choice)
elif choice in choice_totals:
choice_totals[choice] += 1
else:
# be tolerant of improperly marked data
freeforms.append(choice)
freeforms.sort(numal_sort)
summary.append((question.number, question.text, [
(n, t, choice_totals[n]) for (n, t) in choices], freeforms))
return summary
def has_tag(tag, runinfo):
""" Returns true if the given runinfo contains the given tag. """
return tag in (t.strip() for t in runinfo.tags.split(','))
def dep_check(expr, runinfo, answerdict):
"""
Given a comma separated question number and expression, determine if the
provided answer to the question number satisfies the expression.
If the expression starts with >, >=, <, or <=, compare the rest of
the expression numerically and return False if it's not able to be
converted to an integer.
If the expression starts with !, return true if the rest of the expression
does not match the answer.
Otherwise return true if the expression matches the answer.
If there is no comma and only a question number, it checks if the answer
is "yes"
When looking up the answer, it first checks if it's in the answerdict,
then it checks runinfo's cookies, then it does a database lookup to find
the answer.
The use of the comma separator is purely historical.
"""
if hasattr(runinfo, 'questionset'):
questionnaire = runinfo.questionset.questionnaire
elif hasattr(runinfo, 'questionnaire'):
questionnaire = runinfo.questionnaire
else:
assert False
if "," not in expr:
expr = expr + ",yes"
check_questionnum, check_answer = expr.split(",",1)
try:
check_question = Question.objects.get(number=check_questionnum,
questionset__questionnaire = questionnaire)
except Question.DoesNotExist:
return False
if check_question in answerdict:
# test for membership in multiple choice questions
# FIXME: only checking answerdict
for k, v in answerdict[check_question].items():
if not k.startswith('multiple_'):
continue
if check_answer.startswith("!"):
if check_answer[1:].strip() == v.strip():
return False
elif check_answer.strip() == v.strip():
return True
actual_answer = answerdict[check_question].get('ANSWER', '')
elif hasattr(runinfo, 'get_cookie') and runinfo.get_cookie(check_questionnum, False):
actual_answer = runinfo.get_cookie(check_questionnum)
else:
# retrieve from database
ansobj = Answer.objects.filter(question=check_question,
runid=runinfo.runid, subject=runinfo.subject)
if ansobj:
actual_answer = ansobj[0].split_answer()[0]
logging.warn("Put `store` in checks field for question %s" \
% check_questionnum)
else:
actual_answer = None
if not actual_answer:
if check_question.getcheckdict():
actual_answer = check_question.getcheckdict().get('default')
if actual_answer is None:
actual_answer = u''
if check_answer[0:1] in "<>":
try:
actual_answer = float(actual_answer)
if check_answer[1:2] == "=":
check_value = float(check_answer[2:])
else:
check_value = float(check_answer[1:])
except:
logging.error("ERROR: must use numeric values with < <= => > checks (%r)" % check_question)
return False
if check_answer.startswith("<="):
return actual_answer <= check_value
if check_answer.startswith(">="):
return actual_answer >= check_value
if check_answer.startswith("<"):
return actual_answer < check_value
if check_answer.startswith(">"):
return actual_answer > check_value
if check_answer.startswith("!"):
if actual_answer == '':
return False
return check_answer[1:].strip() != actual_answer.strip()
return check_answer.strip() == actual_answer.strip()
@permission_required("questionnaire.management")
def send_email(request, runinfo_id):
if request.method != "POST":
return HttpResponse("This page MUST be called as a POST request.")
runinfo = get_object_or_404(RunInfo, pk=int(runinfo_id))
successful = _send_email(runinfo)
return r2r("emailsent.html", request, runinfo=runinfo, successful=successful)
def generate_run(request, questionnaire_id, subject_id=None):
"""
A view that can generate a RunID instance anonymously,
and then redirect to the questionnaire itself.
It uses a Subject with the givenname of 'Anonymous' and the
surname of 'User'. If this Subject does not exist, it will
be created.
This can be used with a URL pattern like:
(r'^take/(?P<questionnaire_id>[0-9]+)/$', 'questionnaire.views.generate_run'),
"""
qu = get_object_or_404(Questionnaire, id=questionnaire_id)
qs = qu.questionsets()[0]
if subject_id is not None:
su = get_object_or_404(Subject, pk=subject_id)
else:
su = Subject.objects.filter(givenname='Anonymous', surname='User')[0:1]
if su:
su = su[0]
else:
su = Subject(givenname='Anonymous', surname='User')
su.save()
str_to_hash = "".join(map(lambda i: chr(random.randint(0, 255)), range(16)))
str_to_hash += settings.SECRET_KEY
key = md5(str_to_hash).hexdigest()
run = RunInfo(subject=su, random=key, runid=key, questionset=qs)
run.save()
if not use_session:
kwargs = {'runcode': key}
else:
kwargs = {}
request.session['runcode'] = key
questionnaire_start.send(sender=None, runinfo=run, questionnaire=qu)
return HttpResponseRedirect(reverse('questionnaire', kwargs=kwargs))
| |
# -*- coding:utf-8 -*-
"""
WPTools Query module
~~~~~~~~~~~~~~~~~~~~
Support for forming WMF API query strings.
* Mediawiki: https://www.mediawiki.org/wiki/API:Main_page
* Wikidata: https://www.wikidata.org/wiki/Wikidata:Data_access
* RESTBase: https://www.mediawiki.org/wiki/RESTBase
See also:
* WMF: https://wikimediafoundation.org/wiki/Our_projects
"""
try: # python2
from urllib import quote, unquote
except ImportError: # python3
from urllib.parse import quote, unquote
from string import Template
import random
class WPToolsQuery(object):
"""
WPToolsQuery class
"""
DEFAULT_ENDPOINT = '/w/api.php'
MAXWIDTH = 72
RPAD = 4
IMAGEINFO = Template((
"${WIKI}${ENDPOINT}?action=query"
"&format=json"
"&formatversion=2"
"&iiprop=size|url|timestamp|extmetadata"
"&prop=imageinfo"
"&titles=${FILES}"))
LIST = Template((
"${WIKI}${ENDPOINT}?action=query"
"&format=json"
"&formatversion=2"
"&list=${LIST}"))
PARSE = Template((
"${WIKI}${ENDPOINT}?action=parse"
"&format=json"
"&formatversion=2"
"&contentmodel=text"
"&disableeditsection="
"&disablelimitreport="
"&disabletoc="
"&prop=text|iwlinks|parsetree|wikitext|displaytitle|properties"
"&redirects"
"&page=${PAGE}"))
QUERY = Template((
"${WIKI}${ENDPOINT}?action=query"
"&exintro"
"&format=json"
"&formatversion=2"
"&inprop=url|watchers"
"&list=random"
"&pithumbsize=240"
"&pllimit=500"
"&ppprop=disambiguation|wikibase_item"
"&prop=extracts|info|links|pageassessments|pageimages|pageprops"
"|pageterms|redirects"
"&redirects"
"&rdlimit=500"
"&rnlimit=1"
"&rnnamespace=0"
"&titles=${TITLES}"))
QUERYMORE = Template((
"${WIKI}${ENDPOINT}?action=query"
"&bllimit=500"
"&bltitle=${TITLES}"
"&cllimit=500"
"&clshow=!hidden"
"&format=json"
"&formatversion=2"
"&imlimit=500"
"&list=backlinks"
"&lllimit=500"
"&pclimit=500"
"&prop=categories|contributors|images|langlinks|pageviews"
"&redirects"
"&titles=${TITLES}"))
WIKIDATA = Template((
"${WIKI}${ENDPOINT}?action=wbgetentities"
"&format=json"
"&formatversion=2"
"&languages=${LANG}"
"&props=${PROPS}"
"&redirects=yes"))
endpoint = None
lang = None
status = None
variant = None
wiki = None
def __init__(self, lang='en', variant=None, wiki=None, endpoint=None):
"""
Returns a WPToolsQuery object
Arguments:
- [lang=en]: <str> Mediawiki language code
- [variant=None]: <str> language variant
- [wiki=None]: <str> alternative wiki site
- [endpoint=None]: <str> alternative API endoint
"""
self.lang = lang
self.variant = variant
self.wiki = wiki or "%s.wikipedia.org" % self.lang
self.domain = domain_name(self.wiki)
self.endpoint = endpoint or self.DEFAULT_ENDPOINT
self.uri = self.wiki_uri(self.wiki)
def category(self, title, pageid=None, cparams=None, namespace=None):
"""
Returns category query string
"""
query = self.LIST.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
LIST='categorymembers')
status = pageid or title
query += "&cmlimit=500"
if namespace is not None:
query += "&cmnamespace=%d" % namespace
if title and pageid:
title = None
if title:
query += "&cmtitle=" + safequote(title)
if pageid:
query += "&cmpageid=%d" % pageid
if cparams:
query += cparams
status += ' (%s)' % cparams
self.set_status('categorymembers', status)
return query
def labels(self, qids):
"""
Returns Wikidata labels query string
"""
if len(qids) > 50:
raise ValueError("The limit is 50.")
self.domain = 'www.wikidata.org'
self.uri = self.wiki_uri(self.domain)
query = self.WIKIDATA.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
LANG=self.variant or self.lang,
PROPS='labels')
qids = '|'.join(qids)
query += "&ids=%s" % qids
self.set_status('labels', qids)
return query
def imageinfo(self, files):
"""
Returns imageinfo query string
"""
files = '|'.join([safequote(x) for x in files])
self.set_status('imageinfo', files)
return self.IMAGEINFO.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
FILES=files)
def parse(self, title, pageid=None):
"""
Returns Mediawiki action=parse query string
"""
qry = self.PARSE.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
PAGE=safequote(title) or pageid)
if pageid and not title:
qry = qry.replace('&page=', '&pageid=').replace('&redirects', '')
if self.variant:
qry += '&variant=' + self.variant
self.set_status('parse', pageid or title)
return qry
def query(self, titles, pageids=None, cparams=None):
"""
Returns MediaWiki action=query query string
"""
query = self.QUERY.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
TITLES=safequote(titles) or pageids)
status = titles or pageids
if pageids and not titles:
query = query.replace('&titles=', '&pageids=')
if cparams:
query += cparams
status += " (%s)" % cparams
if self.variant:
query += '&variant=' + self.variant
self.set_status('query', status)
return query
def querymore(self, titles, pageids=None, cparams=None):
"""
Returns MediaWiki action=query query string (for MORE)
A much more expensive query for popular pages
"""
query = self.QUERYMORE.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
TITLES=safequote(titles) or pageids)
status = "%s" % (pageids or titles)
if pageids and not titles:
query = query.replace('&titles=', '&pageids=')
if cparams:
query += cparams
status += " (%s)" % cparams
if self.variant:
query += '&variant=' + self.variant
self.set_status('querymore', status)
return query
def random(self, namespace=0):
"""
Returns query string for random page
"""
query = self.LIST.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
LIST='random')
query += "&rnlimit=1&rnnamespace=%d" % namespace
emoji = [
u'\U0001f32f', # burrito or wrap
u'\U0001f355', # slice of pizza
u'\U0001f35c', # steaming bowl of ramen
u'\U0001f363', # sushi
u'\U0001f369', # doughnut
u'\U0001f36a', # cookie
u'\U0001f36d', # lollipop
u'\U0001f370', # strawberry shortcake
]
action = 'random'
if namespace:
action = 'random:%d' % namespace
self.set_status(action, random.choice(emoji))
return query
def restbase(self, endpoint, title):
"""
Returns RESTBase query string
"""
if not endpoint:
raise ValueError("invalid endpoint: %s" % endpoint)
route = endpoint
if title and endpoint != '/page/':
route = endpoint + safequote_restbase(title)
self.set_status('restbase', route)
return "%s/api/rest_v1/%s" % (self.uri, route[1:])
def set_status(self, action, target):
"""
Sets query status with format: "{domain} ({action}) {target}"
"""
try:
target = unquote(target)
except (AttributeError, TypeError):
pass
status = "%s (%s) %s" % (self.domain, action, target)
status = status.strip().replace('\n', '')
if len(status) >= self.MAXWIDTH:
tail = '...'
extent = self.MAXWIDTH - (len(tail) + self.RPAD)
self.status = status[:extent] + tail
else:
self.status = status
def site(self, action):
"""
Returns site query
"""
query = None
viewdays = 7
hostpath = self.uri + self.endpoint
if action == 'siteinfo':
query = hostpath + (
'?action=query'
'&meta=siteinfo|siteviews'
'&siprop=general|statistics'
'&list=mostviewed&pvimlimit=max')
query += '&pvisdays=%d' % viewdays # meta=siteviews
self.set_status('query', 'siteinfo|siteviews|mostviewed')
elif action == 'sitematrix':
query = hostpath + '?action=sitematrix'
self.set_status('sitematrix', 'all')
elif action == 'sitevisitors':
query = hostpath + (
'?action=query'
'&meta=siteviews&pvismetric=uniques')
query += '&pvisdays=%d' % viewdays # meta=siteviews
self.set_status('query', 'siteviews:uniques')
if not query:
raise ValueError("Could not form query")
query += '&format=json&formatversion=2'
return query
def wiki_uri(self, wiki):
"""
Returns scheme://domain from wiki name
"""
if wiki.startswith('http'):
return wiki
return "https://" + self.domain
def wikidata(self, title, wikibase=None):
"""
Returns Wikidata query string
"""
self.domain = 'www.wikidata.org'
self.uri = self.wiki_uri(self.domain)
query = self.WIKIDATA.substitute(
WIKI=self.uri,
ENDPOINT=self.endpoint,
LANG=self.variant or self.lang,
PROPS="aliases|info|claims|descriptions|labels|sitelinks")
if wikibase:
query += "&ids=%s" % wikibase
elif title:
title = safequote(title)
query += "&sites=%swiki" % self.lang
query += "&titles=%s" % title
self.set_status('wikidata', wikibase or title)
return query
def domain_name(wiki):
"""
Returns domain name from wiki name
"""
if '//' in wiki:
wiki = wiki.split('//')[1]
return wiki.split('/')[0]
def safequote(string):
"""
Try to UTF-8 encode and percent-quote string
"""
if string is None:
return
try:
return quote(string.encode('utf-8'))
except UnicodeDecodeError:
return quote(string)
def safequote_restbase(title):
"""
Safequote restbase title possibly having slash in title
"""
try:
return quote(title.encode('utf-8'), safe='')
except UnicodeDecodeError:
return quote(title, safe='')
| |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
# Copyright (C) 2022 RERO.
# Copyright (C) 2022 Graz University of Technology.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test OAI verbs."""
import uuid
from copy import deepcopy
from datetime import datetime, timedelta
from helpers import create_record, run_after_insert_oai_set
from invenio_db import db
from invenio_indexer.api import RecordIndexer
from invenio_pidstore.minters import recid_minter
from invenio_search import current_search
from lxml import etree
from invenio_oaiserver import current_oaiserver
from invenio_oaiserver.minters import oaiid_minter
from invenio_oaiserver.models import OAISet
from invenio_oaiserver.proxies import current_oaiserver
from invenio_oaiserver.response import NS_DC, NS_OAIDC, NS_OAIPMH
from invenio_oaiserver.utils import datetime_to_datestamp, \
eprints_description, friends_description, oai_identifier_description
NAMESPACES = {'x': NS_OAIPMH, 'y': NS_OAIDC, 'z': NS_DC}
def _xpath_errors(body):
"""Find errors in body."""
return list(body.iter('{*}error'))
def test_no_verb(app):
"""Test response when no verb is specified."""
with app.test_client() as c:
result = c.get('/oai2d')
tree = etree.fromstring(result.data)
assert 'Missing data for required field.' in _xpath_errors(
tree)[0].text
def test_wrong_verb(app):
"""Test wrong verb."""
with app.test_client() as c:
result = c.get('/oai2d?verb=Aaa')
tree = etree.fromstring(result.data)
assert 'This is not a valid OAI-PMH verb:Aaa' in _xpath_errors(
tree)[0].text
def test_identify(app):
"""Test Identify verb."""
# baseUrls for friends element
baseUrls = ['http://example.org/1',
'http://example.org/2']
# parameters for eprints element
content = {'URL': 'http://arXiv.org/arXiv_content.htm'}
metadataPolicy = {'text': 'Metadata can be used by commercial'
'and non-commercial service providers',
'URL': 'http://arXiv.org/arXiv_metadata_use.htm'}
dataPolicy = {'text': 'Full content, i.e. preprints may'
'not be harvested by robots'}
submissionPolicy = {'URL': 'http://arXiv.org/arXiv_submission.htm'}
# parameters for oai-identifier element
scheme = 'oai'
repositoryIdentifier = 'oai-stuff.foo.org'
delimiter = ':'
sampleIdentifier = 'oai:oai-stuff.foo.org:5324'
app.config['OAISERVER_DESCRIPTIONS'] = \
[friends_description(baseUrls),
eprints_description(metadataPolicy, dataPolicy,
submissionPolicy, content),
oai_identifier_description(scheme, repositoryIdentifier, delimiter,
sampleIdentifier)]
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify')
assert 200 == result.status_code
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:Identify',
namespaces=NAMESPACES)) == 1
repository_name = tree.xpath('/x:OAI-PMH/x:Identify/x:repositoryName',
namespaces=NAMESPACES)
assert len(repository_name) == 1
assert repository_name[0].text == 'Invenio-OAIServer'
base_url = tree.xpath('/x:OAI-PMH/x:Identify/x:baseURL',
namespaces=NAMESPACES)
assert len(base_url) == 1
assert base_url[0].text == 'http://app/oai2d'
protocolVersion = tree.xpath(
'/x:OAI-PMH/x:Identify/x:protocolVersion',
namespaces=NAMESPACES)
assert len(protocolVersion) == 1
assert protocolVersion[0].text == '2.0'
adminEmail = tree.xpath('/x:OAI-PMH/x:Identify/x:adminEmail',
namespaces=NAMESPACES)
assert len(adminEmail) == 1
assert adminEmail[0].text == 'info@inveniosoftware.org'
earliestDatestamp = tree.xpath(
'/x:OAI-PMH/x:Identify/x:earliestDatestamp',
namespaces=NAMESPACES)
assert len(earliestDatestamp) == 1
deletedRecord = tree.xpath('/x:OAI-PMH/x:Identify/x:deletedRecord',
namespaces=NAMESPACES)
assert len(deletedRecord) == 1
assert deletedRecord[0].text == 'no'
granularity = tree.xpath('/x:OAI-PMH/x:Identify/x:granularity',
namespaces=NAMESPACES)
assert len(granularity) == 1
description = tree.xpath('/x:OAI-PMH/x:Identify/x:description',
namespaces=NAMESPACES)
friends_element = description[0]
for element in friends_element.getchildren():
for child in element.getchildren():
assert child.tag == \
'{http://www.openarchives.org/OAI/2.0/friends/}baseURL'
assert child.text in baseUrls
eprints_root = description[1]
children = eprints_root[0].getchildren()
assert children[0].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}content'
leaves = children[0].getchildren()
assert len(leaves) == 1
assert leaves[0].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}URL'
assert leaves[0].text == content['URL']
assert children[1].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}metadataPolicy'
leaves = children[1].getchildren()
assert len(leaves) == 2
metadataPolicyContents = \
['{http://www.openarchives.org/OAI/2.0/eprints}text',
'{http://www.openarchives.org/OAI/2.0/eprints}URL']
assert set([leaves[0].tag, leaves[1].tag]) == \
set(metadataPolicyContents)
assert set([leaves[0].text, leaves[1].text]) == \
set(metadataPolicy.values())
assert children[2].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}dataPolicy'
leaves = children[2].getchildren()
assert len(leaves) == 1
assert leaves[0].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}text'
assert leaves[0].text == dataPolicy['text']
assert children[3].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}submissionPolicy'
leaves = children[3].getchildren()
assert len(leaves) == 1
assert leaves[0].tag == \
'{http://www.openarchives.org/OAI/2.0/eprints}URL'
assert leaves[0].text == submissionPolicy['URL']
oai_identifier_root = description[2]
children = oai_identifier_root[0].getchildren()
assert children[0].tag == \
'{http://www.openarchives.org/OAI/2.0/oai-identifier}scheme'
assert children[0].text == scheme
assert children[1].tag == \
'{http://www.openarchives.org/OAI/2.0/oai-identifier}' + \
'repositoryIdentifier'
assert children[1].text == repositoryIdentifier
assert children[2].tag == \
'{http://www.openarchives.org/OAI/2.0/oai-identifier}' + \
'delimiter'
assert children[2].text == delimiter
assert children[3].tag == \
'{http://www.openarchives.org/OAI/2.0/oai-identifier}' + \
'sampleIdentifier'
assert children[3].text == sampleIdentifier
def test_identify_earliest_date(app, schema):
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify')
assert 200 == result.status_code
tree = etree.fromstring(result.data)
earliestDatestamp = tree.xpath(
'/x:OAI-PMH/x:Identify/x:earliestDatestamp',
namespaces=NAMESPACES)
assert earliestDatestamp[0].text == '0001-01-01T00:00:00Z'
first_record = create_record(app, {
'_oai': {'sets': ['a']}, 'title_statement': {'title': 'Test0'},
'_oai_id': 1, '$schema': schema
})
first_record.model.created = datetime(2000, 1, 1, 13, 0, 0)
RecordIndexer().index(first_record)
create_record(app, {
'_oai': {'sets': ['a']}, 'title_statement': {'title': 'Test1'},
'_oai_id': 2, '$schema': schema
})
create_record(app, {
'_oai': {'sets': ['a']}, 'title_statement': {'title': 'Test2'},
'_oai_id': 3, '$schema': schema
})
app.extensions['invenio-search'].flush_and_refresh('records')
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify')
assert 200 == result.status_code
tree = etree.fromstring(result.data)
earliestDatestamp = tree.xpath(
'/x:OAI-PMH/x:Identify/x:earliestDatestamp',
namespaces=NAMESPACES)
assert earliestDatestamp[0].text == '2000-01-01T13:00:00Z'
def test_getrecord(app):
"""Test get record verb."""
with app.test_request_context():
pid_value = 'oai:legacy:1'
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {
'_oai': {'id': pid_value},
'title_statement': {'title': 'Test0'},
}
pid = oaiid_minter(record_id, data)
record = current_oaiserver.record_cls.create(data, id_=record_id)
db.session.commit()
assert pid_value == pid.pid_value
record_updated = record.updated
with app.test_client() as c:
result = c.get(
'/oai2d?verb=GetRecord&identifier={0}&metadataPrefix=oai_dc'
.format(pid_value))
assert 200 == result.status_code
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord/x:record/x:header',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:record/x:header/x:identifier',
namespaces=NAMESPACES)) == 1
identifier = tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:record/x:header/x:identifier/text()',
namespaces=NAMESPACES)
assert identifier == [pid_value]
datestamp = tree.xpath(
'/x:OAI-PMH/x:GetRecord/x:record/x:header/x:datestamp/text()',
namespaces=NAMESPACES)
assert datestamp == [datetime_to_datestamp(record_updated)]
assert len(tree.xpath('/x:OAI-PMH/x:GetRecord/x:record/x:metadata',
namespaces=NAMESPACES)) == 1
def test_getrecord_fail(app):
"""Test GetRecord if record doesn't exist."""
with app.test_request_context():
with app.test_client() as c:
result = c.get(
'/oai2d?verb=GetRecord&identifier={0}&metadataPrefix=oai_dc'
.format('not-exist-pid'))
assert 422 == result.status_code
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='idDoesNotExist')
def _check_xml_error(tree, code):
"""Text xml for a error idDoesNotExist."""
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
error = tree.xpath('/x:OAI-PMH/x:error', namespaces=NAMESPACES)
assert len(error) == 1
assert error[0].attrib['code'] == code
def test_identify_with_additional_args(app):
"""Test identify with additional arguments."""
with app.test_client() as c:
result = c.get('/oai2d?verb=Identify¬AValidArg=True')
tree = etree.fromstring(result.data)
assert 'You have passed too many arguments.' == _xpath_errors(
tree)[0].text
def test_listmetadataformats(app):
"""Test ListMetadataFormats."""
_listmetadataformats(app=app, query='/oai2d?verb=ListMetadataFormats')
def test_listmetadataformats_record(app):
"""Test ListMetadataFormats for a record."""
with app.test_request_context():
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title_statement': {'title': 'Test0'}}
recid_minter(record_id, data)
pid = oaiid_minter(record_id, data)
current_oaiserver.record_cls.create(data, id_=record_id)
pid_value = pid.pid_value
db.session.commit()
_listmetadataformats(
app=app,
query='/oai2d?verb=ListMetadataFormats&identifier={0}'.format(
pid_value))
def test_listmetadataformats_record_fail(app):
"""Test ListMetadataFormats for a record that doesn't exist."""
query = '/oai2d?verb=ListMetadataFormats&identifier={0}'.format(
'pid-not-exixts')
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='idDoesNotExist')
def _listmetadataformats(app, query):
"""Try ListMetadataFormats."""
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListMetadataFormats',
namespaces=NAMESPACES)) == 1
metadataFormats = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat',
namespaces=NAMESPACES)
cfg_metadataFormats = deepcopy(
app.config.get('OAISERVER_METADATA_FORMATS', {}))
assert len(metadataFormats) == len(cfg_metadataFormats)
prefixes = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:metadataPrefix', namespaces=NAMESPACES)
assert len(prefixes) == len(cfg_metadataFormats)
assert all(pfx.text in cfg_metadataFormats for pfx in prefixes)
schemas = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:schema', namespaces=NAMESPACES)
assert len(schemas) == len(cfg_metadataFormats)
assert all(sch.text in cfg_metadataFormats[pfx.text]['schema']
for sch, pfx in zip(schemas, prefixes))
metadataNamespaces = tree.xpath(
'/x:OAI-PMH/x:ListMetadataFormats/x:metadataFormat/'
'x:metadataNamespace', namespaces=NAMESPACES)
assert len(metadataNamespaces) == len(cfg_metadataFormats)
assert all(nsp.text in cfg_metadataFormats[pfx.text]['namespace']
for nsp, pfx in zip(metadataNamespaces, prefixes))
def test_listsets(app):
"""Test ListSets."""
with app.test_request_context():
current_oaiserver.unregister_signals_oaiset()
with db.session.begin_nested():
a = OAISet(spec='test', name='Test', description='test desc')
db.session.add(a)
with app.test_client() as c:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setSpec',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setName',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath(
'/x:OAI-PMH/x:ListSets/x:set/x:setDescription',
namespaces=NAMESPACES
)) == 1
assert len(
tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc',
namespaces=NAMESPACES)
) == 1
assert len(
tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc/'
'z:description', namespaces=NAMESPACES)
) == 1
text = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc/'
'z:description/text()', namespaces=NAMESPACES)
assert len(text) == 1
assert text[0] == 'test desc'
def test_listsets_invalid_name(app):
"""Test ListSets with invalid unicode character for XML."""
with app.test_request_context():
current_oaiserver.unregister_signals_oaiset()
with db.session.begin_nested():
a = OAISet(spec='test', name=u'uni\x01co\x0bde',
description=u'uni\x01co\x0bde')
db.session.add(a)
with app.test_client() as c:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
assert tree.xpath('/x:OAI-PMH/x:ListSets/x:set/x:setName',
namespaces=NAMESPACES)[0].text == 'unicode'
assert tree.xpath(
'/x:OAI-PMH/x:ListSets/x:set/x:setDescription/y:dc/z:description',
namespaces=NAMESPACES)[0].text == 'unicode'
def test_fail_missing_metadataPrefix(app):
"""Test ListRecords fail missing metadataPrefix."""
queries = [
'/oai2d?verb=ListRecords',
'/oai2d?verb=GetRecord&identifier=123',
'/oai2d?verb=ListIdentifiers'
]
for query in queries:
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_fail_not_exist_metadataPrefix(app):
"""Test ListRecords fail not exist metadataPrefix."""
queries = [
'/oai2d?verb=ListRecords&metadataPrefix=not-exist',
'/oai2d?verb=GetRecord&identifier=123&metadataPrefix=not-exist',
'/oai2d?verb=ListIdentifiers&metadataPrefix=not-exist'
]
for query in queries:
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_listrecords_fail_missing_metadataPrefix(app):
"""Test ListRecords fail missing metadataPrefix."""
query = '/oai2d?verb=ListRecords&'
with app.test_request_context():
with app.test_client() as c:
result = c.get(query)
tree = etree.fromstring(result.data)
_check_xml_error(tree, code='badArgument')
def test_listrecords(app):
"""Test ListRecords."""
total = 32
record_ids = []
with app.test_request_context():
indexer = RecordIndexer()
with db.session.begin_nested():
for idx in range(total):
record_id = uuid.uuid4()
data = {'title_statement': {'title': 'Test{0}'.format(idx)}}
recid_minter(record_id, data)
oaiid_minter(record_id, data)
record = current_oaiserver.record_cls.create(data, id_=record_id)
record_ids.append(record_id)
db.session.commit()
for record_id in record_ids:
indexer.index_by_id(record_id)
current_search.flush_and_refresh('_all')
with app.test_client() as c:
result = c.get('/oai2d?verb=ListRecords&metadataPrefix=oai_dc')
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header',
namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:identifier', namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:datestamp', namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:metadata',
namespaces=NAMESPACES)) == 10
# First resumption token
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListRecords/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert resumption_token.text
# Get data for resumption token
with app.test_client() as c:
result = c.get(
'/oai2d?verb=ListRecords&resumptionToken={0}'.format(
resumption_token.text
)
)
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header',
namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:identifier', namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:datestamp', namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:metadata',
namespaces=NAMESPACES)) == 10
# Second resumption token
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListRecords/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert resumption_token.text
# Get data for resumption token
with app.test_client() as c:
result = c.get(
'/oai2d?verb=ListRecords&resumptionToken={0}'.format(
resumption_token.text
)
)
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header',
namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:identifier', namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:datestamp', namespaces=NAMESPACES)) == 10
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:metadata',
namespaces=NAMESPACES)) == 10
# Third resumption token
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListRecords/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert resumption_token.text
with app.test_client() as c:
result = c.get(
'/oai2d?verb=ListRecords&resumptionToken={0}'.format(
resumption_token.text
)
)
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=NAMESPACES)) == 2
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header',
namespaces=NAMESPACES)) == 2
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:identifier', namespaces=NAMESPACES)) == 2
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:header'
'/x:datestamp', namespaces=NAMESPACES)) == 2
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record/x:metadata',
namespaces=NAMESPACES)) == 2
# No fourth resumption token
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListRecords/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert not resumption_token.text
# Check from:until range
with app.test_client() as c:
# Check date and datetime timestamps.
for granularity in (False, True):
result = c.get(
'/oai2d?verb=ListRecords&metadataPrefix=oai_dc'
'&from={0}&until={1}'.format(
datetime_to_datestamp(
record.updated - timedelta(days=1),
day_granularity=granularity),
datetime_to_datestamp(
record.updated + timedelta(days=1),
day_granularity=granularity),
)
)
assert result.status_code == 200
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListRecords/x:record',
namespaces=NAMESPACES)) == 10
# Check from:until range in resumption token
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListRecords/x:resumptionToken',
namespaces=NAMESPACES
)[0]
assert resumption_token.text
with app.test_client() as c:
result = c.get(
'/oai2d?verb=ListRecords&resumptionToken={0}'.format(
resumption_token.text
)
)
assert result.status_code == 200
def test_listidentifiers(app):
"""Test verb ListIdentifiers."""
from invenio_oaiserver.models import OAISet
with app.app_context():
current_oaiserver.unregister_signals_oaiset()
# create new OAI Set
with db.session.begin_nested():
oaiset = OAISet(
spec='test0',
name='Test0',
description='test desc 0',
search_pattern='title_statement.title:Test0',
)
db.session.add(oaiset)
db.session.commit()
run_after_insert_oai_set()
with app.test_request_context():
indexer = RecordIndexer()
# create a new record (inside the OAI Set)
with db.session.begin_nested():
record_id = uuid.uuid4()
data = {'title_statement': {'title': 'Test0'}}
recid_minter(record_id, data)
pid = oaiid_minter(record_id, data)
record = current_oaiserver.record_cls.create(data, id_=record_id)
db.session.commit()
indexer.index_by_id(record_id)
current_search.flush_and_refresh('_all')
pid_value = pid.pid_value
# get the list of identifiers
with app.test_client() as c:
result = c.get(
'/oai2d?verb=ListIdentifiers&metadataPrefix=oai_dc'
)
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH', namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListIdentifiers',
namespaces=NAMESPACES)) == 1
assert len(tree.xpath('/x:OAI-PMH/x:ListIdentifiers/x:header',
namespaces=NAMESPACES)) == 1
identifier = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:identifier',
namespaces=NAMESPACES
)
assert len(identifier) == 1
assert identifier[0].text == str(pid_value)
datestamp = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:datestamp',
namespaces=NAMESPACES
)
assert len(datestamp) == 1
assert datestamp[0].text == datetime_to_datestamp(record.updated)
# Check from:until range
with app.test_client() as c:
# Check date and datetime timestamps.
for granularity in (False, True):
result = c.get(
'/oai2d?verb=ListIdentifiers&metadataPrefix=oai_dc'
'&from={0}&until={1}'.format(
datetime_to_datestamp(
record.updated - timedelta(1),
day_granularity=granularity),
datetime_to_datestamp(
record.updated + timedelta(1),
day_granularity=granularity),
)
)
assert result.status_code == 200
tree = etree.fromstring(result.data)
identifier = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:identifier',
namespaces=NAMESPACES
)
assert len(identifier) == 1
# check set param
with app.test_client() as c:
for granularity in (False, True):
result = c.get(
'/oai2d?verb=ListIdentifiers&metadataPrefix=oai_dc'
'&set=test0'.format(
datetime_to_datestamp(
record.updated - timedelta(1),
day_granularity=granularity),
datetime_to_datestamp(
record.updated + timedelta(1),
day_granularity=granularity),
)
)
assert result.status_code == 200
tree = etree.fromstring(result.data)
identifier = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:identifier',
namespaces=NAMESPACES
)
assert len(identifier) == 1
# check from:until range and set param
with app.test_client() as c:
for granularity in (False, True):
result = c.get(
'/oai2d?verb=ListIdentifiers&metadataPrefix=oai_dc'
'&from={0}&until={1}&set=test0'.format(
datetime_to_datestamp(
record.updated - timedelta(1),
day_granularity=granularity),
datetime_to_datestamp(
record.updated + timedelta(1),
day_granularity=granularity),
)
)
assert result.status_code == 200
tree = etree.fromstring(result.data)
identifier = tree.xpath(
'/x:OAI-PMH/x:ListIdentifiers/x:header/x:identifier',
namespaces=NAMESPACES
)
assert len(identifier) == 1
def test_list_sets_long(app):
"""Test listing of sets."""
from invenio_db import db
from invenio_oaiserver.models import OAISet
with app.app_context():
current_oaiserver.unregister_signals_oaiset()
with db.session.begin_nested():
for i in range(27):
oaiset = OAISet(
spec='test{0}'.format(i),
name='Test{0}'.format(i),
description='test desc {0}'.format(i),
search_pattern='title_statement.title:Test{0}'.format(i),
)
db.session.add(oaiset)
db.session.commit()
run_after_insert_oai_set()
with app.test_client() as c:
# First page:
result = c.get('/oai2d?verb=ListSets')
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=NAMESPACES)) == 10
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert resumption_token.text
# Second page:
result = c.get('/oai2d?verb=ListSets&resumptionToken={0}'.format(
resumption_token.text
))
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=NAMESPACES)) == 10
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert resumption_token.text
# Third page:
result = c.get('/oai2d?verb=ListSets&resumptionToken={0}'.format(
resumption_token.text
))
tree = etree.fromstring(result.data)
assert len(tree.xpath('/x:OAI-PMH/x:ListSets/x:set',
namespaces=NAMESPACES)) == 7
resumption_token = tree.xpath(
'/x:OAI-PMH/x:ListSets/x:resumptionToken', namespaces=NAMESPACES
)[0]
assert not resumption_token.text
def test_list_sets_with_resumption_token_and_other_args(app):
"""Test list sets with resumption tokens."""
pass
| |
from __future__ import absolute_import, division, print_function, with_statement
from tornado import gen
from tornado.escape import json_decode, utf8, to_unicode, recursive_unicode, native_str, to_basestring
from tornado.httputil import format_timestamp
from tornado.iostream import IOStream
from tornado.log import app_log, gen_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.template import DictLoader
from tornado.testing import AsyncHTTPTestCase, ExpectLog
from tornado.test.util import unittest
from tornado.util import u, bytes_type, ObjectDict, unicode_type
from tornado.web import RequestHandler, authenticated, Application, asynchronous, url, HTTPError, StaticFileHandler, _create_signature, create_signed_value, ErrorHandler, UIModule, MissingArgumentError
import binascii
import datetime
import email.utils
import logging
import os
import re
import socket
import sys
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
wsgi_safe_tests = []
relpath = lambda *a: os.path.join(os.path.dirname(__file__), *a)
def wsgi_safe(cls):
wsgi_safe_tests.append(cls)
return cls
class WebTestCase(AsyncHTTPTestCase):
"""Base class for web tests that also supports WSGI mode.
Override get_handlers and get_app_kwargs instead of get_app.
Append to wsgi_safe to have it run in wsgi_test as well.
"""
def get_app(self):
self.app = Application(self.get_handlers(), **self.get_app_kwargs())
return self.app
def get_handlers(self):
raise NotImplementedError()
def get_app_kwargs(self):
return {}
class SimpleHandlerTestCase(WebTestCase):
"""Simplified base class for tests that work with a single handler class.
To use, define a nested class named ``Handler``.
"""
def get_handlers(self):
return [('/', self.Handler)]
class CookieTestRequestHandler(RequestHandler):
# stub out enough methods to make the secure_cookie functions work
def __init__(self):
# don't call super.__init__
self._cookies = {}
self.application = ObjectDict(settings=dict(cookie_secret='0123456789'))
def get_cookie(self, name):
return self._cookies.get(name)
def set_cookie(self, name, value, expires_days=None):
self._cookies[name] = value
class SecureCookieTest(unittest.TestCase):
def test_round_trip(self):
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b'bar')
self.assertEqual(handler.get_secure_cookie('foo'), b'bar')
def test_cookie_tampering_future_timestamp(self):
handler = CookieTestRequestHandler()
# this string base64-encodes to '12345678'
handler.set_secure_cookie('foo', binascii.a2b_hex(b'd76df8e7aefc'))
cookie = handler._cookies['foo']
match = re.match(br'12345678\|([0-9]+)\|([0-9a-f]+)', cookie)
self.assertTrue(match)
timestamp = match.group(1)
sig = match.group(2)
self.assertEqual(
_create_signature(handler.application.settings["cookie_secret"],
'foo', '12345678', timestamp),
sig)
# shifting digits from payload to timestamp doesn't alter signature
# (this is not desirable behavior, just confirming that that's how it
# works)
self.assertEqual(
_create_signature(handler.application.settings["cookie_secret"],
'foo', '1234', b'5678' + timestamp),
sig)
# tamper with the cookie
handler._cookies['foo'] = utf8('1234|5678%s|%s' % (
to_basestring(timestamp), to_basestring(sig)))
# it gets rejected
with ExpectLog(gen_log, "Cookie timestamp in future"):
self.assertTrue(handler.get_secure_cookie('foo') is None)
def test_arbitrary_bytes(self):
# Secure cookies accept arbitrary data (which is base64 encoded).
# Note that normal cookies accept only a subset of ascii.
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b'\xe9')
self.assertEqual(handler.get_secure_cookie('foo'), b'\xe9')
class CookieTest(WebTestCase):
def get_handlers(self):
class SetCookieHandler(RequestHandler):
def get(self):
# Try setting cookies with different argument types
# to ensure that everything gets encoded correctly
self.set_cookie("str", "asdf")
self.set_cookie("unicode", u("qwer"))
self.set_cookie("bytes", b"zxcv")
class GetCookieHandler(RequestHandler):
def get(self):
self.write(self.get_cookie("foo", "default"))
class SetCookieDomainHandler(RequestHandler):
def get(self):
# unicode domain and path arguments shouldn't break things
# either (see bug #285)
self.set_cookie("unicode_args", "blah", domain=u("foo.com"),
path=u("/foo"))
class SetCookieSpecialCharHandler(RequestHandler):
def get(self):
self.set_cookie("equals", "a=b")
self.set_cookie("semicolon", "a;b")
self.set_cookie("quote", 'a"b')
class SetCookieOverwriteHandler(RequestHandler):
def get(self):
self.set_cookie("a", "b", domain="example.com")
self.set_cookie("c", "d", domain="example.com")
# A second call with the same name clobbers the first.
# Attributes from the first call are not carried over.
self.set_cookie("a", "e")
return [("/set", SetCookieHandler),
("/get", GetCookieHandler),
("/set_domain", SetCookieDomainHandler),
("/special_char", SetCookieSpecialCharHandler),
("/set_overwrite", SetCookieOverwriteHandler),
]
def test_set_cookie(self):
response = self.fetch("/set")
self.assertEqual(sorted(response.headers.get_list("Set-Cookie")),
["bytes=zxcv; Path=/",
"str=asdf; Path=/",
"unicode=qwer; Path=/",
])
def test_get_cookie(self):
response = self.fetch("/get", headers={"Cookie": "foo=bar"})
self.assertEqual(response.body, b"bar")
response = self.fetch("/get", headers={"Cookie": 'foo="bar"'})
self.assertEqual(response.body, b"bar")
response = self.fetch("/get", headers={"Cookie": "/=exception;"})
self.assertEqual(response.body, b"default")
def test_set_cookie_domain(self):
response = self.fetch("/set_domain")
self.assertEqual(response.headers.get_list("Set-Cookie"),
["unicode_args=blah; Domain=foo.com; Path=/foo"])
def test_cookie_special_char(self):
response = self.fetch("/special_char")
headers = sorted(response.headers.get_list("Set-Cookie"))
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0], 'equals="a=b"; Path=/')
self.assertEqual(headers[1], 'quote="a\\"b"; Path=/')
# python 2.7 octal-escapes the semicolon; older versions leave it alone
self.assertTrue(headers[2] in ('semicolon="a;b"; Path=/',
'semicolon="a\\073b"; Path=/'),
headers[2])
data = [('foo=a=b', 'a=b'),
('foo="a=b"', 'a=b'),
('foo="a;b"', 'a;b'),
# ('foo=a\\073b', 'a;b'), # even encoded, ";" is a delimiter
('foo="a\\073b"', 'a;b'),
('foo="a\\"b"', 'a"b'),
]
for header, expected in data:
logging.debug("trying %r", header)
response = self.fetch("/get", headers={"Cookie": header})
self.assertEqual(response.body, utf8(expected))
def test_set_cookie_overwrite(self):
response = self.fetch("/set_overwrite")
headers = response.headers.get_list("Set-Cookie")
self.assertEqual(sorted(headers),
["a=e; Path=/", "c=d; Domain=example.com; Path=/"])
class AuthRedirectRequestHandler(RequestHandler):
def initialize(self, login_url):
self.login_url = login_url
def get_login_url(self):
return self.login_url
@authenticated
def get(self):
# we'll never actually get here because the test doesn't follow redirects
self.send_error(500)
class AuthRedirectTest(WebTestCase):
def get_handlers(self):
return [('/relative', AuthRedirectRequestHandler,
dict(login_url='/login')),
('/absolute', AuthRedirectRequestHandler,
dict(login_url='http://example.com/login'))]
def test_relative_auth_redirect(self):
self.http_client.fetch(self.get_url('/relative'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['Location'], '/login?next=%2Frelative')
def test_absolute_auth_redirect(self):
self.http_client.fetch(self.get_url('/absolute'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertTrue(re.match(
'http://example.com/login\?next=http%3A%2F%2Flocalhost%3A[0-9]+%2Fabsolute',
response.headers['Location']), response.headers['Location'])
class ConnectionCloseHandler(RequestHandler):
def initialize(self, test):
self.test = test
@asynchronous
def get(self):
self.test.on_handler_waiting()
def on_connection_close(self):
self.test.on_connection_close()
class ConnectionCloseTest(WebTestCase):
def get_handlers(self):
return [('/', ConnectionCloseHandler, dict(test=self))]
def test_connection_close(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("localhost", self.get_http_port()))
self.stream = IOStream(s, io_loop=self.io_loop)
self.stream.write(b"GET / HTTP/1.0\r\n\r\n")
self.wait()
def on_handler_waiting(self):
logging.debug('handler waiting')
self.stream.close()
def on_connection_close(self):
logging.debug('connection closed')
self.stop()
class EchoHandler(RequestHandler):
def get(self, *path_args):
# Type checks: web.py interfaces convert argument values to
# unicode strings (by default, but see also decode_argument).
# In httpserver.py (i.e. self.request.arguments), they're left
# as bytes. Keys are always native strings.
for key in self.request.arguments:
if type(key) != str:
raise Exception("incorrect type for key: %r" % type(key))
for value in self.request.arguments[key]:
if type(value) != bytes_type:
raise Exception("incorrect type for value: %r" %
type(value))
for value in self.get_arguments(key):
if type(value) != unicode_type:
raise Exception("incorrect type for value: %r" %
type(value))
for arg in path_args:
if type(arg) != unicode_type:
raise Exception("incorrect type for path arg: %r" % type(arg))
self.write(dict(path=self.request.path,
path_args=path_args,
args=recursive_unicode(self.request.arguments)))
class RequestEncodingTest(WebTestCase):
def get_handlers(self):
return [("/group/(.*)", EchoHandler),
("/slashes/([^/]*)/([^/]*)", EchoHandler),
]
def fetch_json(self, path):
return json_decode(self.fetch(path).body)
def test_group_question_mark(self):
# Ensure that url-encoded question marks are handled properly
self.assertEqual(self.fetch_json('/group/%3F'),
dict(path='/group/%3F', path_args=['?'], args={}))
self.assertEqual(self.fetch_json('/group/%3F?%3F=%3F'),
dict(path='/group/%3F', path_args=['?'], args={'?': ['?']}))
def test_group_encoding(self):
# Path components and query arguments should be decoded the same way
self.assertEqual(self.fetch_json('/group/%C3%A9?arg=%C3%A9'),
{u("path"): u("/group/%C3%A9"),
u("path_args"): [u("\u00e9")],
u("args"): {u("arg"): [u("\u00e9")]}})
def test_slashes(self):
# Slashes may be escaped to appear as a single "directory" in the path,
# but they are then unescaped when passed to the get() method.
self.assertEqual(self.fetch_json('/slashes/foo/bar'),
dict(path="/slashes/foo/bar",
path_args=["foo", "bar"],
args={}))
self.assertEqual(self.fetch_json('/slashes/a%2Fb/c%2Fd'),
dict(path="/slashes/a%2Fb/c%2Fd",
path_args=["a/b", "c/d"],
args={}))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
self.check_type('status', self.get_status(), int)
# get_argument is an exception from the general rule of using
# type str for non-body data mainly for historical reasons.
self.check_type('argument', self.get_argument('foo'), unicode_type)
self.check_type('cookie_key', list(self.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.cookies.values())[0].value, str)
# Secure cookies return bytes because they can contain arbitrary
# data, but regular cookies are native strings.
if list(self.cookies.keys()) != ['asdf']:
raise Exception("unexpected values for cookie keys: %r" %
self.cookies.keys())
self.check_type('get_secure_cookie', self.get_secure_cookie('asdf'), bytes_type)
self.check_type('get_cookie', self.get_cookie('asdf'), str)
self.check_type('xsrf_token', self.xsrf_token, bytes_type)
self.check_type('xsrf_form_html', self.xsrf_form_html(), str)
self.check_type('reverse_url', self.reverse_url('typecheck', 'foo'), str)
self.check_type('request_summary', self._request_summary(), str)
def get(self, path_component):
# path_component uses type unicode instead of str for consistency
# with get_argument()
self.check_type('path_component', path_component, unicode_type)
self.write(self.errors)
def post(self, path_component):
self.check_type('path_component', path_component, unicode_type)
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class DecodeArgHandler(RequestHandler):
def decode_argument(self, value, name=None):
if type(value) != bytes_type:
raise Exception("unexpected type for value: %r" % type(value))
# use self.request.arguments directly to avoid recursion
if 'encoding' in self.request.arguments:
return value.decode(to_unicode(self.request.arguments['encoding'][0]))
else:
return value
def get(self, arg):
def describe(s):
if type(s) == bytes_type:
return ["bytes", native_str(binascii.b2a_hex(s))]
elif type(s) == unicode_type:
return ["unicode", s]
raise Exception("unknown type")
self.write({'path': describe(arg),
'query': describe(self.get_argument("foo")),
})
class LinkifyHandler(RequestHandler):
def get(self):
self.render("linkify.html", message="http://example.com")
class UIModuleResourceHandler(RequestHandler):
def get(self):
self.render("page.html", entries=[1, 2])
class OptionalPathHandler(RequestHandler):
def get(self, path):
self.write({"path": path})
class FlowControlHandler(RequestHandler):
# These writes are too small to demonstrate real flow control,
# but at least it shows that the callbacks get run.
@asynchronous
def get(self):
self.write("1")
self.flush(callback=self.step2)
def step2(self):
self.write("2")
self.flush(callback=self.step3)
def step3(self):
self.write("3")
self.finish()
class MultiHeaderHandler(RequestHandler):
def get(self):
self.set_header("x-overwrite", "1")
self.set_header("X-Overwrite", 2)
self.add_header("x-multi", 3)
self.add_header("X-Multi", "4")
class RedirectHandler(RequestHandler):
def get(self):
if self.get_argument('permanent', None) is not None:
self.redirect('/', permanent=int(self.get_argument('permanent')))
elif self.get_argument('status', None) is not None:
self.redirect('/', status=int(self.get_argument('status')))
else:
raise Exception("didn't get permanent or status arguments")
class EmptyFlushCallbackHandler(RequestHandler):
@gen.engine
@asynchronous
def get(self):
# Ensure that the flush callback is run whether or not there
# was any output.
yield gen.Task(self.flush) # "empty" flush, but writes headers
yield gen.Task(self.flush) # empty flush
self.write("o")
yield gen.Task(self.flush) # flushes the "o"
yield gen.Task(self.flush) # empty flush
self.finish("k")
class HeaderInjectionHandler(RequestHandler):
def get(self):
try:
self.set_header("X-Foo", "foo\r\nX-Bar: baz")
raise Exception("Didn't get expected exception")
except ValueError as e:
if "Unsafe header value" in str(e):
self.finish(b"ok")
else:
raise
class GetArgumentHandler(RequestHandler):
def prepare(self):
if self.get_argument('source', None) == 'query':
method = self.get_query_argument
elif self.get_argument('source', None) == 'body':
method = self.get_body_argument
else:
method = self.get_argument
self.finish(method("foo", "default"))
class GetArgumentsHandler(RequestHandler):
def prepare(self):
self.finish(dict(default=self.get_arguments("foo"),
query=self.get_query_arguments("foo"),
body=self.get_body_arguments("foo")))
# This test is shared with wsgi_test.py
@wsgi_safe
class WSGISafeWebTest(WebTestCase):
COOKIE_SECRET = "WebTest.COOKIE_SECRET"
def get_app_kwargs(self):
loader = DictLoader({
"linkify.html": "{% module linkify(message) %}",
"page.html": """\
<html><head></head><body>
{% for e in entries %}
{% module Template("entry.html", entry=e) %}
{% end %}
</body></html>""",
"entry.html": """\
{{ set_resources(embedded_css=".entry { margin-bottom: 1em; }", embedded_javascript="js_embed()", css_files=["/base.css", "/foo.css"], javascript_files="/common.js", html_head="<meta>", html_body='<script src="/analytics.js"/>') }}
<div class="entry">...</div>""",
})
return dict(template_loader=loader,
autoescape="xhtml_escape",
cookie_secret=self.COOKIE_SECRET)
def tearDown(self):
super(WSGISafeWebTest, self).tearDown()
RequestHandler._template_loaders.clear()
def get_handlers(self):
urls = [
url("/typecheck/(.*)", TypeCheckHandler, name='typecheck'),
url("/decode_arg/(.*)", DecodeArgHandler, name='decode_arg'),
url("/decode_arg_kw/(?P<arg>.*)", DecodeArgHandler),
url("/linkify", LinkifyHandler),
url("/uimodule_resources", UIModuleResourceHandler),
url("/optional_path/(.+)?", OptionalPathHandler),
url("/multi_header", MultiHeaderHandler),
url("/redirect", RedirectHandler),
url("/header_injection", HeaderInjectionHandler),
url("/get_argument", GetArgumentHandler),
url("/get_arguments", GetArgumentsHandler),
]
return urls
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
def test_types(self):
cookie_value = to_unicode(create_signed_value(self.COOKIE_SECRET,
"asdf", "qwer"))
response = self.fetch("/typecheck/asdf?foo=bar",
headers={"Cookie": "asdf=" + cookie_value})
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck/asdf?foo=bar", method="POST",
headers={"Cookie": "asdf=" + cookie_value},
body="foo=bar")
def test_decode_argument(self):
# These urls all decode to the same thing
urls = ["/decode_arg/%C3%A9?foo=%C3%A9&encoding=utf-8",
"/decode_arg/%E9?foo=%E9&encoding=latin1",
"/decode_arg_kw/%E9?foo=%E9&encoding=latin1",
]
for url in urls:
response = self.fetch(url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u('path'): [u('unicode'), u('\u00e9')],
u('query'): [u('unicode'), u('\u00e9')],
})
response = self.fetch("/decode_arg/%C3%A9?foo=%C3%A9")
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u('path'): [u('bytes'), u('c3a9')],
u('query'): [u('bytes'), u('c3a9')],
})
def test_decode_argument_plus(self):
# These urls are all equivalent.
urls = ["/decode_arg/1%20%2B%201?foo=1%20%2B%201&encoding=utf-8",
"/decode_arg/1%20+%201?foo=1+%2B+1&encoding=utf-8"]
for url in urls:
response = self.fetch(url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u('path'): [u('unicode'), u('1 + 1')],
u('query'): [u('unicode'), u('1 + 1')],
})
def test_reverse_url(self):
self.assertEqual(self.app.reverse_url('decode_arg', 'foo'),
'/decode_arg/foo')
self.assertEqual(self.app.reverse_url('decode_arg', 42),
'/decode_arg/42')
self.assertEqual(self.app.reverse_url('decode_arg', b'\xe9'),
'/decode_arg/%E9')
self.assertEqual(self.app.reverse_url('decode_arg', u('\u00e9')),
'/decode_arg/%C3%A9')
self.assertEqual(self.app.reverse_url('decode_arg', '1 + 1'),
'/decode_arg/1%20%2B%201')
def test_uimodule_unescaped(self):
response = self.fetch("/linkify")
self.assertEqual(response.body,
b"<a href=\"http://example.com\">http://example.com</a>")
def test_uimodule_resources(self):
response = self.fetch("/uimodule_resources")
self.assertEqual(response.body, b"""\
<html><head><link href="/base.css" type="text/css" rel="stylesheet"/><link href="/foo.css" type="text/css" rel="stylesheet"/>
<style type="text/css">
.entry { margin-bottom: 1em; }
</style>
<meta>
</head><body>
<div class="entry">...</div>
<div class="entry">...</div>
<script src="/common.js" type="text/javascript"></script>
<script type="text/javascript">
//<![CDATA[
js_embed()
//]]>
</script>
<script src="/analytics.js"/>
</body></html>""")
def test_optional_path(self):
self.assertEqual(self.fetch_json("/optional_path/foo"),
{u("path"): u("foo")})
self.assertEqual(self.fetch_json("/optional_path/"),
{u("path"): None})
def test_multi_header(self):
response = self.fetch("/multi_header")
self.assertEqual(response.headers["x-overwrite"], "2")
self.assertEqual(response.headers.get_list("x-multi"), ["3", "4"])
def test_redirect(self):
response = self.fetch("/redirect?permanent=1", follow_redirects=False)
self.assertEqual(response.code, 301)
response = self.fetch("/redirect?permanent=0", follow_redirects=False)
self.assertEqual(response.code, 302)
response = self.fetch("/redirect?status=307", follow_redirects=False)
self.assertEqual(response.code, 307)
def test_header_injection(self):
response = self.fetch("/header_injection")
self.assertEqual(response.body, b"ok")
def test_get_argument(self):
response = self.fetch("/get_argument?foo=bar")
self.assertEqual(response.body, b"bar")
response = self.fetch("/get_argument?foo=")
self.assertEqual(response.body, b"")
response = self.fetch("/get_argument")
self.assertEqual(response.body, b"default")
# Test merging of query and body arguments.
# In singular form, body arguments take precedence over query arguments.
body = urllib_parse.urlencode(dict(foo="hello"))
response = self.fetch("/get_argument?foo=bar", method="POST", body=body)
self.assertEqual(response.body, b"hello")
# In plural methods they are merged.
response = self.fetch("/get_arguments?foo=bar",
method="POST", body=body)
self.assertEqual(json_decode(response.body),
dict(default=['bar', 'hello'],
query=['bar'],
body=['hello']))
def test_get_query_arguments(self):
# send as a post so we can ensure the separation between query
# string and body arguments.
body = urllib_parse.urlencode(dict(foo="hello"))
response = self.fetch("/get_argument?source=query&foo=bar",
method="POST", body=body)
self.assertEqual(response.body, b"bar")
response = self.fetch("/get_argument?source=query&foo=",
method="POST", body=body)
self.assertEqual(response.body, b"")
response = self.fetch("/get_argument?source=query",
method="POST", body=body)
self.assertEqual(response.body, b"default")
def test_get_body_arguments(self):
body = urllib_parse.urlencode(dict(foo="bar"))
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"bar")
body = urllib_parse.urlencode(dict(foo=""))
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"")
body = urllib_parse.urlencode(dict())
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"default")
def test_no_gzip(self):
response = self.fetch('/get_argument')
self.assertNotIn('Accept-Encoding', response.headers.get('Vary', ''))
self.assertNotIn('gzip', response.headers.get('Content-Encoding', ''))
class NonWSGIWebTests(WebTestCase):
def get_handlers(self):
return [("/flow_control", FlowControlHandler),
("/empty_flush", EmptyFlushCallbackHandler),
]
def test_flow_control(self):
self.assertEqual(self.fetch("/flow_control").body, b"123")
def test_empty_flush(self):
response = self.fetch("/empty_flush")
self.assertEqual(response.body, b"ok")
@wsgi_safe
class ErrorResponseTest(WebTestCase):
def get_handlers(self):
class DefaultHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
raise HTTPError(int(self.get_argument("status")))
1 / 0
class WriteErrorHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1 / 0
def write_error(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exc_info" in kwargs:
self.write("Exception: %s" % kwargs["exc_info"][0].__name__)
else:
self.write("Status: %d" % status_code)
class GetErrorHtmlHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1 / 0
def get_error_html(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exception" in kwargs:
self.write("Exception: %s" % sys.exc_info()[0].__name__)
else:
self.write("Status: %d" % status_code)
class FailedWriteErrorHandler(RequestHandler):
def get(self):
1 / 0
def write_error(self, status_code, **kwargs):
raise Exception("exception in write_error")
return [url("/default", DefaultHandler),
url("/write_error", WriteErrorHandler),
url("/get_error_html", GetErrorHtmlHandler),
url("/failed_write_error", FailedWriteErrorHandler),
]
def test_default(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/default")
self.assertEqual(response.code, 500)
self.assertTrue(b"500: Internal Server Error" in response.body)
response = self.fetch("/default?status=503")
self.assertEqual(response.code, 503)
self.assertTrue(b"503: Service Unavailable" in response.body)
def test_write_error(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b"Exception: ZeroDivisionError", response.body)
response = self.fetch("/write_error?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b"Status: 503", response.body)
def test_get_error_html(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/get_error_html")
self.assertEqual(response.code, 500)
self.assertEqual(b"Exception: ZeroDivisionError", response.body)
response = self.fetch("/get_error_html?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b"Status: 503", response.body)
def test_failed_write_error(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/failed_write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b"", response.body)
@wsgi_safe
class StaticFileTest(WebTestCase):
# The expected MD5 hash of robots.txt, used in tests that call
# StaticFileHandler.get_version
robots_txt_hash = b"f71d20196d4caf35b6a670db8c70b03d"
static_dir = os.path.join(os.path.dirname(__file__), 'static')
def get_handlers(self):
class StaticUrlHandler(RequestHandler):
def get(self, path):
with_v = int(self.get_argument('include_version', 1))
self.write(self.static_url(path, include_version=with_v))
class AbsoluteStaticUrlHandler(StaticUrlHandler):
include_host = True
class OverrideStaticUrlHandler(RequestHandler):
def get(self, path):
do_include = bool(self.get_argument("include_host"))
self.include_host = not do_include
regular_url = self.static_url(path)
override_url = self.static_url(path, include_host=do_include)
if override_url == regular_url:
return self.write(str(False))
protocol = self.request.protocol + "://"
protocol_length = len(protocol)
check_regular = regular_url.find(protocol, 0, protocol_length)
check_override = override_url.find(protocol, 0, protocol_length)
if do_include:
result = (check_override == 0 and check_regular == -1)
else:
result = (check_override == -1 and check_regular == 0)
self.write(str(result))
return [('/static_url/(.*)', StaticUrlHandler),
('/abs_static_url/(.*)', AbsoluteStaticUrlHandler),
('/override_static_url/(.*)', OverrideStaticUrlHandler)]
def get_app_kwargs(self):
return dict(static_path=relpath('static'))
def test_static_files(self):
response = self.fetch('/robots.txt')
self.assertTrue(b"Disallow: /" in response.body)
response = self.fetch('/static/robots.txt')
self.assertTrue(b"Disallow: /" in response.body)
def test_static_url(self):
response = self.fetch("/static_url/robots.txt")
self.assertEqual(response.body,
b"/static/robots.txt?v=" + self.robots_txt_hash)
def test_absolute_static_url(self):
response = self.fetch("/abs_static_url/robots.txt")
self.assertEqual(response.body, (
utf8(self.get_url("/")) +
b"static/robots.txt?v=" +
self.robots_txt_hash
))
def test_relative_version_exclusion(self):
response = self.fetch("/static_url/robots.txt?include_version=0")
self.assertEqual(response.body, b"/static/robots.txt")
def test_absolute_version_exclusion(self):
response = self.fetch("/abs_static_url/robots.txt?include_version=0")
self.assertEqual(response.body,
utf8(self.get_url("/") + "static/robots.txt"))
def test_include_host_override(self):
self._trigger_include_host_check(False)
self._trigger_include_host_check(True)
def _trigger_include_host_check(self, include_host):
path = "/override_static_url/robots.txt?include_host=%s"
response = self.fetch(path % int(include_host))
self.assertEqual(response.body, utf8(str(True)))
def test_static_304_if_modified_since(self):
response1 = self.fetch("/static/robots.txt")
response2 = self.fetch("/static/robots.txt", headers={
'If-Modified-Since': response1.headers['Last-Modified']})
self.assertEqual(response2.code, 304)
self.assertTrue('Content-Length' not in response2.headers)
self.assertTrue('Last-Modified' not in response2.headers)
def test_static_304_if_none_match(self):
response1 = self.fetch("/static/robots.txt")
response2 = self.fetch("/static/robots.txt", headers={
'If-None-Match': response1.headers['Etag']})
self.assertEqual(response2.code, 304)
def test_static_if_modified_since_pre_epoch(self):
# On windows, the functions that work with time_t do not accept
# negative values, and at least one client (processing.js) seems
# to use if-modified-since 1/1/1960 as a cache-busting technique.
response = self.fetch("/static/robots.txt", headers={
'If-Modified-Since': 'Fri, 01 Jan 1960 00:00:00 GMT'})
self.assertEqual(response.code, 200)
def test_static_if_modified_since_time_zone(self):
# Instead of the value from Last-Modified, make requests with times
# chosen just before and after the known modification time
# of the file to ensure that the right time zone is being used
# when parsing If-Modified-Since.
stat = os.stat(relpath('static/robots.txt'))
response = self.fetch('/static/robots.txt', headers={
'If-Modified-Since': format_timestamp(stat.st_mtime - 1)})
self.assertEqual(response.code, 200)
response = self.fetch('/static/robots.txt', headers={
'If-Modified-Since': format_timestamp(stat.st_mtime + 1)})
self.assertEqual(response.code, 304)
def test_static_etag(self):
response = self.fetch('/static/robots.txt')
self.assertEqual(utf8(response.headers.get("Etag")),
b'"' + self.robots_txt_hash + b'"')
def test_static_with_range(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=0-9'})
self.assertEqual(response.code, 206)
self.assertEqual(response.body, b"User-agent")
self.assertEqual(utf8(response.headers.get("Etag")),
b'"' + self.robots_txt_hash + b'"')
self.assertEqual(response.headers.get("Content-Length"), "10")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 0-9/26")
def test_static_with_range_full_file(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=0-'})
# Note: Chrome refuses to play audio if it gets an HTTP 206 in response
# to ``Range: bytes=0-`` :(
self.assertEqual(response.code, 200)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()))
self.assertEqual(response.headers.get("Content-Length"), "26")
self.assertEqual(response.headers.get("Content-Range"), None)
def test_static_with_range_full_past_end(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=0-10000000'})
self.assertEqual(response.code, 200)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()))
self.assertEqual(response.headers.get("Content-Length"), "26")
self.assertEqual(response.headers.get("Content-Range"), None)
def test_static_with_range_partial_past_end(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=1-10000000'})
self.assertEqual(response.code, 206)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()[1:]))
self.assertEqual(response.headers.get("Content-Length"), "25")
self.assertEqual(response.headers.get("Content-Range"), "bytes 1-25/26")
def test_static_with_range_end_edge(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=22-'})
self.assertEqual(response.body, b": /\n")
self.assertEqual(response.headers.get("Content-Length"), "4")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 22-25/26")
def test_static_with_range_neg_end(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=-4'})
self.assertEqual(response.body, b": /\n")
self.assertEqual(response.headers.get("Content-Length"), "4")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 22-25/26")
def test_static_invalid_range(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'asdf'})
self.assertEqual(response.code, 200)
def test_static_unsatisfiable_range_zero_suffix(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=-0'})
self.assertEqual(response.headers.get("Content-Range"),
"bytes */26")
self.assertEqual(response.code, 416)
def test_static_unsatisfiable_range_invalid_start(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=26'})
self.assertEqual(response.code, 416)
self.assertEqual(response.headers.get("Content-Range"),
"bytes */26")
def test_static_head(self):
response = self.fetch('/static/robots.txt', method='HEAD')
self.assertEqual(response.code, 200)
# No body was returned, but we did get the right content length.
self.assertEqual(response.body, b'')
self.assertEqual(response.headers['Content-Length'], '26')
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_head_range(self):
response = self.fetch('/static/robots.txt', method='HEAD',
headers={'Range': 'bytes=1-4'})
self.assertEqual(response.code, 206)
self.assertEqual(response.body, b'')
self.assertEqual(response.headers['Content-Length'], '4')
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_range_if_none_match(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=1-4',
'If-None-Match': b'"' + self.robots_txt_hash + b'"'})
self.assertEqual(response.code, 304)
self.assertEqual(response.body, b'')
self.assertTrue('Content-Length' not in response.headers)
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_404(self):
response = self.fetch('/static/blarg')
self.assertEqual(response.code, 404)
@wsgi_safe
class StaticDefaultFilenameTest(WebTestCase):
def get_app_kwargs(self):
return dict(static_path=relpath('static'),
static_handler_args=dict(default_filename='index.html'))
def get_handlers(self):
return []
def test_static_default_filename(self):
response = self.fetch('/static/dir/', follow_redirects=False)
self.assertEqual(response.code, 200)
self.assertEqual(b'this is the index\n', response.body)
def test_static_default_redirect(self):
response = self.fetch('/static/dir', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertTrue(response.headers['Location'].endswith('/static/dir/'))
@wsgi_safe
class StaticFileWithPathTest(WebTestCase):
def get_app_kwargs(self):
return dict(static_path=relpath('static'),
static_handler_args=dict(default_filename='index.html'))
def get_handlers(self):
return [("/foo/(.*)", StaticFileHandler, {
"path": relpath("templates/"),
})]
def test_serve(self):
response = self.fetch("/foo/utf8.html")
self.assertEqual(response.body, b"H\xc3\xa9llo\n")
@wsgi_safe
class CustomStaticFileTest(WebTestCase):
def get_handlers(self):
class MyStaticFileHandler(StaticFileHandler):
@classmethod
def make_static_url(cls, settings, path):
version_hash = cls.get_version(settings, path)
extension_index = path.rindex('.')
before_version = path[:extension_index]
after_version = path[(extension_index + 1):]
return '/static/%s.%s.%s' % (before_version, version_hash,
after_version)
def parse_url_path(self, url_path):
extension_index = url_path.rindex('.')
version_index = url_path.rindex('.', 0, extension_index)
return '%s%s' % (url_path[:version_index],
url_path[extension_index:])
@classmethod
def get_absolute_path(cls, settings, path):
return 'CustomStaticFileTest:' + path
def validate_absolute_path(self, root, absolute_path):
return absolute_path
@classmethod
def get_content(self, path, start=None, end=None):
assert start is None and end is None
if path == 'CustomStaticFileTest:foo.txt':
return b'bar'
raise Exception("unexpected path %r" % path)
def get_modified_time(self):
return None
@classmethod
def get_version(cls, settings, path):
return "42"
class StaticUrlHandler(RequestHandler):
def get(self, path):
self.write(self.static_url(path))
self.static_handler_class = MyStaticFileHandler
return [("/static_url/(.*)", StaticUrlHandler)]
def get_app_kwargs(self):
return dict(static_path="dummy",
static_handler_class=self.static_handler_class)
def test_serve(self):
response = self.fetch("/static/foo.42.txt")
self.assertEqual(response.body, b"bar")
def test_static_url(self):
with ExpectLog(gen_log, "Could not open static file", required=False):
response = self.fetch("/static_url/foo.txt")
self.assertEqual(response.body, b"/static/foo.42.txt")
@wsgi_safe
class HostMatchingTest(WebTestCase):
class Handler(RequestHandler):
def initialize(self, reply):
self.reply = reply
def get(self):
self.write(self.reply)
def get_handlers(self):
return [("/foo", HostMatchingTest.Handler, {"reply": "wildcard"})]
def test_host_matching(self):
self.app.add_handlers("www.example.com",
[("/foo", HostMatchingTest.Handler, {"reply": "[0]"})])
self.app.add_handlers(r"www\.example\.com",
[("/bar", HostMatchingTest.Handler, {"reply": "[1]"})])
self.app.add_handlers("www.example.com",
[("/baz", HostMatchingTest.Handler, {"reply": "[2]"})])
response = self.fetch("/foo")
self.assertEqual(response.body, b"wildcard")
response = self.fetch("/bar")
self.assertEqual(response.code, 404)
response = self.fetch("/baz")
self.assertEqual(response.code, 404)
response = self.fetch("/foo", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[0]")
response = self.fetch("/bar", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[1]")
response = self.fetch("/baz", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[2]")
@wsgi_safe
class NamedURLSpecGroupsTest(WebTestCase):
def get_handlers(self):
class EchoHandler(RequestHandler):
def get(self, path):
self.write(path)
return [("/str/(?P<path>.*)", EchoHandler),
(u("/unicode/(?P<path>.*)"), EchoHandler)]
def test_named_urlspec_groups(self):
response = self.fetch("/str/foo")
self.assertEqual(response.body, b"foo")
response = self.fetch("/unicode/bar")
self.assertEqual(response.body, b"bar")
@wsgi_safe
class ClearHeaderTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_header("h1", "foo")
self.set_header("h2", "bar")
self.clear_header("h1")
self.clear_header("nonexistent")
def test_clear_header(self):
response = self.fetch("/")
self.assertTrue("h1" not in response.headers)
self.assertEqual(response.headers["h2"], "bar")
@wsgi_safe
class Header304Test(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_header("Content-Language", "en_US")
self.write("hello")
def test_304_headers(self):
response1 = self.fetch('/')
self.assertEqual(response1.headers["Content-Length"], "5")
self.assertEqual(response1.headers["Content-Language"], "en_US")
response2 = self.fetch('/', headers={
'If-None-Match': response1.headers["Etag"]})
self.assertEqual(response2.code, 304)
self.assertTrue("Content-Length" not in response2.headers)
self.assertTrue("Content-Language" not in response2.headers)
# Not an entity header, but should not be added to 304s by chunking
self.assertTrue("Transfer-Encoding" not in response2.headers)
@wsgi_safe
class StatusReasonTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
reason = self.request.arguments.get('reason', [])
self.set_status(int(self.get_argument('code')),
reason=reason[0] if reason else None)
def get_http_client(self):
# simple_httpclient only: curl doesn't expose the reason string
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_status(self):
response = self.fetch("/?code=304")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Not Modified")
response = self.fetch("/?code=304&reason=Foo")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Foo")
response = self.fetch("/?code=682&reason=Bar")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Bar")
with ExpectLog(app_log, 'Uncaught exception'):
response = self.fetch("/?code=682")
self.assertEqual(response.code, 500)
@wsgi_safe
class DateHeaderTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.write("hello")
def test_date_header(self):
response = self.fetch('/')
header_date = datetime.datetime(
*email.utils.parsedate(response.headers['Date'])[:6])
self.assertTrue(header_date - datetime.datetime.utcnow() <
datetime.timedelta(seconds=2))
@wsgi_safe
class RaiseWithReasonTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
raise HTTPError(682, reason="Foo")
def get_http_client(self):
# simple_httpclient only: curl doesn't expose the reason string
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_raise_with_reason(self):
response = self.fetch("/")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Foo")
self.assertIn(b'682: Foo', response.body)
def test_httperror_str(self):
self.assertEqual(str(HTTPError(682, reason="Foo")), "HTTP 682: Foo")
@wsgi_safe
class ErrorHandlerXSRFTest(WebTestCase):
def get_handlers(self):
# note that if the handlers list is empty we get the default_host
# redirect fallback instead of a 404, so test with both an
# explicitly defined error handler and an implicit 404.
return [('/error', ErrorHandler, dict(status_code=417))]
def get_app_kwargs(self):
return dict(xsrf_cookies=True)
def test_error_xsrf(self):
response = self.fetch('/error', method='POST', body='')
self.assertEqual(response.code, 417)
def test_404_xsrf(self):
response = self.fetch('/404', method='POST', body='')
self.assertEqual(response.code, 404)
class GzipTestCase(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
if self.get_argument('vary', None):
self.set_header('Vary', self.get_argument('vary'))
self.write('hello world')
def get_app_kwargs(self):
return dict(gzip=True)
def test_gzip(self):
response = self.fetch('/')
self.assertEqual(response.headers['Content-Encoding'], 'gzip')
self.assertEqual(response.headers['Vary'], 'Accept-Encoding')
def test_gzip_not_requested(self):
response = self.fetch('/', use_gzip=False)
self.assertNotIn('Content-Encoding', response.headers)
self.assertEqual(response.headers['Vary'], 'Accept-Encoding')
def test_vary_already_present(self):
response = self.fetch('/?vary=Accept-Language')
self.assertEqual(response.headers['Vary'],
'Accept-Language, Accept-Encoding')
@wsgi_safe
class PathArgsInPrepareTest(WebTestCase):
class Handler(RequestHandler):
def prepare(self):
self.write(dict(args=self.path_args, kwargs=self.path_kwargs))
def get(self, path):
assert path == 'foo'
self.finish()
def get_handlers(self):
return [('/pos/(.*)', self.Handler),
('/kw/(?P<path>.*)', self.Handler)]
def test_pos(self):
response = self.fetch('/pos/foo')
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {'args': ['foo'], 'kwargs': {}})
def test_kw(self):
response = self.fetch('/kw/foo')
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {'args': [], 'kwargs': {'path': 'foo'}})
@wsgi_safe
class ClearAllCookiesTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.clear_all_cookies()
self.write('ok')
def test_clear_all_cookies(self):
response = self.fetch('/', headers={'Cookie': 'foo=bar; baz=xyzzy'})
set_cookies = sorted(response.headers.get_list('Set-Cookie'))
self.assertTrue(set_cookies[0].startswith('baz=;'))
self.assertTrue(set_cookies[1].startswith('foo=;'))
class PermissionError(Exception):
pass
@wsgi_safe
class ExceptionHandlerTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
exc = self.get_argument('exc')
if exc == 'http':
raise HTTPError(410, "no longer here")
elif exc == 'zero':
1 / 0
elif exc == 'permission':
raise PermissionError('not allowed')
def write_error(self, status_code, **kwargs):
if 'exc_info' in kwargs:
typ, value, tb = kwargs['exc_info']
if isinstance(value, PermissionError):
self.set_status(403)
self.write('PermissionError')
return
RequestHandler.write_error(self, status_code, **kwargs)
def log_exception(self, typ, value, tb):
if isinstance(value, PermissionError):
app_log.warning('custom logging for PermissionError: %s',
value.args[0])
else:
RequestHandler.log_exception(self, typ, value, tb)
def test_http_error(self):
# HTTPErrors are logged as warnings with no stack trace.
# TODO: extend ExpectLog to test this more precisely
with ExpectLog(gen_log, '.*no longer here'):
response = self.fetch('/?exc=http')
self.assertEqual(response.code, 410)
def test_unknown_error(self):
# Unknown errors are logged as errors with a stack trace.
with ExpectLog(app_log, 'Uncaught exception'):
response = self.fetch('/?exc=zero')
self.assertEqual(response.code, 500)
def test_known_error(self):
# log_exception can override logging behavior, and write_error
# can override the response.
with ExpectLog(app_log,
'custom logging for PermissionError: not allowed'):
response = self.fetch('/?exc=permission')
self.assertEqual(response.code, 403)
@wsgi_safe
class UIMethodUIModuleTest(SimpleHandlerTestCase):
"""Test that UI methods and modules are created correctly and
associated with the handler.
"""
class Handler(RequestHandler):
def get(self):
self.render('foo.html')
def value(self):
return self.get_argument("value")
def get_app_kwargs(self):
def my_ui_method(handler, x):
return "In my_ui_method(%s) with handler value %s." % (
x, handler.value())
class MyModule(UIModule):
def render(self, x):
return "In MyModule(%s) with handler value %s." % (
x, self.handler.value())
loader = DictLoader({
'foo.html': '{{ my_ui_method(42) }} {% module MyModule(123) %}',
})
return dict(template_loader=loader,
ui_methods={'my_ui_method': my_ui_method},
ui_modules={'MyModule': MyModule})
def tearDown(self):
super(UIMethodUIModuleTest, self).tearDown()
# TODO: fix template loader caching so this isn't necessary.
RequestHandler._template_loaders.clear()
def test_ui_method(self):
response = self.fetch('/?value=asdf')
self.assertEqual(response.body,
b'In my_ui_method(42) with handler value asdf. '
b'In MyModule(123) with handler value asdf.')
@wsgi_safe
class GetArgumentErrorTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
try:
self.get_argument('foo')
self.write({})
except MissingArgumentError as e:
self.write({'arg_name': e.arg_name,
'log_message': e.log_message})
def test_catch_error(self):
response = self.fetch('/')
self.assertEqual(json_decode(response.body),
{'arg_name': 'foo',
'log_message': 'Missing argument foo'})
class MultipleExceptionTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
exc_count = 0
@asynchronous
def get(self):
from tornado.ioloop import IOLoop
IOLoop.current().add_callback(lambda: 1 / 0)
IOLoop.current().add_callback(lambda: 1 / 0)
def log_exception(self, typ, value, tb):
MultipleExceptionTest.Handler.exc_count += 1
def test_multi_exception(self):
# This test verifies that multiple exceptions raised into the same
# ExceptionStackContext do not generate extraneous log entries
# due to "Cannot send error response after headers written".
# log_exception is called, but it does not proceed to send_error.
response = self.fetch('/')
self.assertEqual(response.code, 500)
response = self.fetch('/')
self.assertEqual(response.code, 500)
# Each of our two requests generated two exceptions, we should have
# seen at least three of them by now (the fourth may still be
# in the queue).
self.assertGreater(MultipleExceptionTest.Handler.exc_count, 2)
@wsgi_safe
class SetCurrentUserTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def prepare(self):
self.current_user = 'Ben'
def get(self):
self.write('Hello %s' % self.current_user)
def test_set_current_user(self):
# Ensure that current_user can be assigned to normally for apps
# that want to forgo the lazy get_current_user property
response = self.fetch('/')
self.assertEqual(response.body, b'Hello Ben')
@wsgi_safe
class GetCurrentUserTest(WebTestCase):
def get_app_kwargs(self):
class WithoutUserModule(UIModule):
def render(self):
return ''
class WithUserModule(UIModule):
def render(self):
return str(self.current_user)
loader = DictLoader({
'without_user.html': '',
'with_user.html': '{{ current_user }}',
'without_user_module.html': '{% module WithoutUserModule() %}',
'with_user_module.html': '{% module WithUserModule() %}',
})
return dict(template_loader=loader,
ui_modules={'WithUserModule': WithUserModule,
'WithoutUserModule': WithoutUserModule})
def tearDown(self):
super(GetCurrentUserTest, self).tearDown()
RequestHandler._template_loaders.clear()
def get_handlers(self):
class CurrentUserHandler(RequestHandler):
def prepare(self):
self.has_loaded_current_user = False
def get_current_user(self):
self.has_loaded_current_user = True
return ''
class WithoutUserHandler(CurrentUserHandler):
def get(self):
self.render_string('without_user.html')
self.finish(str(self.has_loaded_current_user))
class WithUserHandler(CurrentUserHandler):
def get(self):
self.render_string('with_user.html')
self.finish(str(self.has_loaded_current_user))
class CurrentUserModuleHandler(CurrentUserHandler):
def get_template_namespace(self):
# If RequestHandler.get_template_namespace is called, then
# get_current_user is evaluated. Until #820 is fixed, this
# is a small hack to circumvent the issue.
return self.ui
class WithoutUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string('without_user_module.html')
self.finish(str(self.has_loaded_current_user))
class WithUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string('with_user_module.html')
self.finish(str(self.has_loaded_current_user))
return [('/without_user', WithoutUserHandler),
('/with_user', WithUserHandler),
('/without_user_module', WithoutUserModuleHandler),
('/with_user_module', WithUserModuleHandler)]
@unittest.skip('needs fix')
def test_get_current_user_is_lazy(self):
# TODO: Make this test pass. See #820.
response = self.fetch('/without_user')
self.assertEqual(response.body, b'False')
def test_get_current_user_works(self):
response = self.fetch('/with_user')
self.assertEqual(response.body, b'True')
def test_get_current_user_from_ui_module_is_lazy(self):
response = self.fetch('/without_user_module')
self.assertEqual(response.body, b'False')
def test_get_current_user_from_ui_module_works(self):
response = self.fetch('/with_user_module')
self.assertEqual(response.body, b'True')
@wsgi_safe
class UnimplementedHTTPMethodsTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
pass
def test_unimplemented_standard_methods(self):
for method in ['HEAD', 'GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/', method=method)
self.assertEqual(response.code, 405)
for method in ['POST', 'PUT']:
response = self.fetch('/', method=method, body=b'')
self.assertEqual(response.code, 405)
class UnimplementedNonStandardMethodsTest(SimpleHandlerTestCase):
# wsgiref.validate complains about unknown methods in a way that makes
# this test not wsgi_safe.
class Handler(RequestHandler):
def other(self):
# Even though this method exists, it won't get called automatically
# because it is not in SUPPORTED_METHODS.
self.write('other')
def test_unimplemented_patch(self):
# PATCH is recently standardized; Tornado supports it by default
# but wsgiref.validate doesn't like it.
response = self.fetch('/', method='PATCH', body=b'')
self.assertEqual(response.code, 405)
def test_unimplemented_other(self):
response = self.fetch('/', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.code, 405)
@wsgi_safe
class AllHTTPMethodsTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def method(self):
self.write(self.request.method)
get = delete = options = post = put = method
def test_standard_methods(self):
response = self.fetch('/', method='HEAD')
self.assertEqual(response.body, b'')
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT']:
response = self.fetch('/', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
class PatchMethodTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def patch(self):
self.write('patch')
def other(self):
self.write('other')
def test_patch(self):
response = self.fetch('/', method='PATCH', body=b'')
self.assertEqual(response.body, b'patch')
def test_other(self):
response = self.fetch('/', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'other')
@wsgi_safe
class FinishInPrepareTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def prepare(self):
self.finish('done')
def get(self):
# It's difficult to assert for certain that a method did not
# or will not be called in an asynchronous context, but this
# will be logged noisily if it is reached.
raise Exception('should not reach this method')
def test_finish_in_prepare(self):
response = self.fetch('/')
self.assertEqual(response.body, b'done')
@wsgi_safe
class Default404Test(WebTestCase):
def get_handlers(self):
# If there are no handlers at all a default redirect handler gets added.
return [('/foo', RequestHandler)]
def test_404(self):
response = self.fetch('/')
self.assertEqual(response.code, 404)
self.assertEqual(response.body,
b'<html><title>404: Not Found</title>'
b'<body>404: Not Found</body></html>')
@wsgi_safe
class Custom404Test(WebTestCase):
def get_handlers(self):
return [('/foo', RequestHandler)]
def get_app_kwargs(self):
class Custom404Handler(RequestHandler):
def get(self):
self.set_status(404)
self.write('custom 404 response')
return dict(default_handler_class=Custom404Handler)
def test_404(self):
response = self.fetch('/')
self.assertEqual(response.code, 404)
self.assertEqual(response.body, b'custom 404 response')
@wsgi_safe
class DefaultHandlerArgumentsTest(WebTestCase):
def get_handlers(self):
return [('/foo', RequestHandler)]
def get_app_kwargs(self):
return dict(default_handler_class=ErrorHandler,
default_handler_args=dict(status_code=403))
def test_403(self):
response = self.fetch('/')
self.assertEqual(response.code, 403)
| |
from hashlib import sha1
from datetime import datetime
import logging
import mimetypes
import re
import urllib
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.contrib.markup.templatetags import markup
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.conf import settings
from django.template.defaultfilters import slugify, striptags
from django.utils.translation import ugettext_lazy as _
from django.utils.text import truncate_html_words
from decorators import logtime, once_per_instance
WORD_LIMIT = getattr(settings, 'ARTICLES_TEASER_LIMIT', 75)
AUTO_TAG = getattr(settings, 'ARTICLES_AUTO_TAG', True)
DEFAULT_DB = getattr(settings, 'ARTICLES_DEFAULT_DB', 'default')
LOOKUP_LINK_TITLE = getattr(settings, 'ARTICLES_LOOKUP_LINK_TITLE', True)
MARKUP_HTML = 'h'
MARKUP_MARKDOWN = 'm'
MARKUP_REST = 'r'
MARKUP_TEXTILE = 't'
MARKUP_OPTIONS = getattr(settings, 'ARTICLE_MARKUP_OPTIONS', (
(MARKUP_HTML, _('HTML/Plain Text')),
(MARKUP_MARKDOWN, _('Markdown')),
(MARKUP_REST, _('ReStructured Text')),
(MARKUP_TEXTILE, _('Textile'))
))
MARKUP_DEFAULT = getattr(settings, 'ARTICLE_MARKUP_DEFAULT', MARKUP_HTML)
USE_ADDTHIS_BUTTON = getattr(settings, 'USE_ADDTHIS_BUTTON', True)
ADDTHIS_USE_AUTHOR = getattr(settings, 'ADDTHIS_USE_AUTHOR', True)
DEFAULT_ADDTHIS_USER = getattr(settings, 'DEFAULT_ADDTHIS_USER', None)
# regex used to find links in an article
LINK_RE = re.compile('<a.*?href="(.*?)".*?>(.*?)</a>', re.I|re.M)
TITLE_RE = re.compile('<title.*?>(.*?)</title>', re.I|re.M)
TAG_RE = re.compile('[^a-z0-9\-_\+\:\.]?', re.I)
log = logging.getLogger('articles.models')
def get_name(user):
"""
Provides a way to fall back to a user's username if their full name has not
been entered.
"""
key = 'username_for_%s' % user.id
log.debug('Looking for "%s" in cache (%s)' % (key, user))
name = cache.get(key)
if not name:
log.debug('Name not found')
if len(user.get_full_name().strip()):
log.debug('Using full name')
name = user.get_full_name()
else:
log.debug('Using username')
name = user.username
log.debug('Caching %s as "%s" for a while' % (key, name))
cache.set(key, name, 86400)
return name
User.get_name = get_name
class Tag(models.Model):
name = models.CharField(max_length=64, unique=True)
slug = models.CharField(max_length=64, unique=True, null=True, blank=True)
def __unicode__(self):
return self.name
@staticmethod
def clean_tag(name):
"""Replace spaces with dashes, in case someone adds such a tag manually"""
name = name.replace(' ', '-').encode('ascii', 'ignore')
name = TAG_RE.sub('', name)
clean = name.lower().strip(", ")
log.debug('Cleaned tag "%s" to "%s"' % (name, clean))
return clean
def save(self, *args, **kwargs):
"""Cleans up any characters I don't want in a URL"""
log.debug('Ensuring that tag "%s" has a slug' % (self,))
self.slug = Tag.clean_tag(self.name)
super(Tag, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
return ('articles_display_tag', (self.cleaned,))
@property
def cleaned(self):
"""Returns the clean version of the tag"""
return self.slug or Tag.clean_tag(self.name)
@property
def rss_name(self):
return self.cleaned
class Meta:
ordering = ('name',)
class ArticleStatusManager(models.Manager):
def default(self):
default = self.all()[:1]
if len(default) == 0:
return None
else:
return default[0]
class ArticleStatus(models.Model):
name = models.CharField(max_length=50)
ordering = models.IntegerField(default=0)
is_live = models.BooleanField(default=False, blank=True)
objects = ArticleStatusManager()
class Meta:
ordering = ('ordering', 'name')
verbose_name_plural = _('Article statuses')
def __unicode__(self):
if self.is_live:
return u'%s (live)' % self.name
else:
return self.name
class ArticleManager(models.Manager):
def active(self):
"""
Retrieves all active articles which have been published and have not
yet expired.
"""
now = datetime.now()
return self.get_query_set().filter(
Q(expiration_date__isnull=True) |
Q(expiration_date__gte=now),
publish_date__lte=now,
is_active=True)
def live(self, user=None):
"""Retrieves all live articles"""
qs = self.active()
if user is not None and user.is_superuser:
# superusers get to see all articles
return qs
else:
# only show live articles to regular users
return qs.filter(status__is_live=True)
MARKUP_HELP = _("""Select the type of markup you are using in this article.
<ul>
<li><a href="http://daringfireball.net/projects/markdown/basics" target="_blank">Markdown Guide</a></li>
<li><a href="http://docutils.sourceforge.net/docs/user/rst/quickref.html" target="_blank">ReStructured Text Guide</a></li>
<li><a href="http://thresholdstate.com/articles/4312/the-textile-reference-manual" target="_blank">Textile Guide</a></li>
</ul>""")
class Article(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(unique_for_year='publish_date')
status = models.ForeignKey(ArticleStatus, default=ArticleStatus.objects.default)
author = models.ForeignKey(User)
sites = models.ManyToManyField(Site, blank=True)
keywords = models.TextField(blank=True, help_text=_("If omitted, the keywords will be the same as the article tags."))
description = models.TextField(blank=True, help_text=_("If omitted, the description will be determined by the first bit of the article's content."))
markup = models.CharField(max_length=1, choices=MARKUP_OPTIONS, default=MARKUP_DEFAULT, help_text=MARKUP_HELP)
content = models.TextField()
rendered_content = models.TextField()
tags = models.ManyToManyField(Tag, help_text=_('Tags that describe this article'), blank=True)
auto_tag = models.BooleanField(default=AUTO_TAG, blank=True, help_text=_('Check this if you want to automatically assign any existing tags to this article based on its content.'))
followup_for = models.ManyToManyField('self', symmetrical=False, blank=True, help_text=_('Select any other articles that this article follows up on.'), related_name='followups')
related_articles = models.ManyToManyField('self', blank=True)
publish_date = models.DateTimeField(default=datetime.now, help_text=_('The date and time this article shall appear online.'))
expiration_date = models.DateTimeField(blank=True, null=True, help_text=_('Leave blank if the article does not expire.'))
is_active = models.BooleanField(default=True, blank=True)
login_required = models.BooleanField(blank=True, help_text=_('Enable this if users must login before they can read this article.'))
use_addthis_button = models.BooleanField(_('Show AddThis button'), blank=True, default=USE_ADDTHIS_BUTTON, help_text=_('Check this to show an AddThis bookmark button when viewing an article.'))
addthis_use_author = models.BooleanField(_("Use article author's username"), blank=True, default=ADDTHIS_USE_AUTHOR, help_text=_("Check this if you want to use the article author's username for the AddThis button. Respected only if the username field is left empty."))
addthis_username = models.CharField(_('AddThis Username'), max_length=50, blank=True, default=DEFAULT_ADDTHIS_USER, help_text=_('The AddThis username to use for the button.'))
objects = ArticleManager()
def __init__(self, *args, **kwargs):
"""Makes sure that we have some rendered content to use"""
super(Article, self).__init__(*args, **kwargs)
self._next = None
self._previous = None
self._teaser = None
if self.id:
# mark the article as inactive if it's expired and still active
if self.expiration_date and self.expiration_date <= datetime.now() and self.is_active:
self.is_active = False
self.save()
if not self.rendered_content or not len(self.rendered_content.strip()):
self.save()
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
"""Renders the article using the appropriate markup language."""
using = kwargs.get('using', DEFAULT_DB)
self.do_render_markup()
self.do_addthis_button()
self.do_meta_description()
self.do_unique_slug(using)
super(Article, self).save(*args, **kwargs)
# do some things that require an ID first
requires_save = self.do_auto_tag(using)
requires_save |= self.do_tags_to_keywords()
requires_save |= self.do_default_site(using)
if requires_save:
# bypass the other processing
super(Article, self).save()
def do_render_markup(self):
"""Turns any markup into HTML"""
original = self.rendered_content
if self.markup == MARKUP_MARKDOWN:
self.rendered_content = markup.markdown(self.content)
elif self.markup == MARKUP_REST:
self.rendered_content = markup.restructuredtext(self.content)
elif self.markup == MARKUP_TEXTILE:
self.rendered_content = markup.textile(self.content)
else:
self.rendered_content = self.content
return (self.rendered_content != original)
def do_addthis_button(self):
"""Sets the AddThis username for this post"""
# if the author wishes to have an "AddThis" button on this article,
# make sure we have a username to go along with it.
if self.use_addthis_button and self.addthis_use_author and not self.addthis_username:
self.addthis_username = self.author.username
return True
return False
def do_unique_slug(self, using=DEFAULT_DB):
"""
Ensures that the slug is always unique for the year this article was
posted
"""
if not self.id:
# make sure we have a slug first
if not len(self.slug.strip()):
self.slug = slugify(self.title)
self.slug = self.get_unique_slug(self.slug, using)
return True
return False
def do_tags_to_keywords(self):
"""
If meta keywords is empty, sets them using the article tags.
Returns True if an additional save is required, False otherwise.
"""
if len(self.keywords.strip()) == 0:
self.keywords = ', '.join([t.name for t in self.tags.all()])
return True
return False
def do_meta_description(self):
"""
If meta description is empty, sets it to the article's teaser.
Returns True if an additional save is required, False otherwise.
"""
if len(self.description.strip()) == 0:
self.description = self.teaser
return True
return False
@logtime
@once_per_instance
def do_auto_tag(self, using=DEFAULT_DB):
"""
Performs the auto-tagging work if necessary.
Returns True if an additional save is required, False otherwise.
"""
if not self.auto_tag:
log.debug('Article "%s" (ID: %s) is not marked for auto-tagging. Skipping.' % (self.title, self.pk))
return False
# don't clobber any existing tags!
existing_ids = [t.id for t in self.tags.all()]
log.debug('Article %s already has these tags: %s' % (self.pk, existing_ids))
unused = Tag.objects.all()
if hasattr(unused, 'using'):
unused = unused.using(using)
unused = unused.exclude(id__in=existing_ids)
found = False
to_search = (self.content, self.title, self.description, self.keywords)
for tag in unused:
regex = re.compile(r'\b%s\b' % tag.name, re.I)
if any(regex.search(text) for text in to_search):
log.debug('Applying Tag "%s" (%s) to Article %s' % (tag, tag.pk, self.pk))
self.tags.add(tag)
found = True
return found
def do_default_site(self, using=DEFAULT_DB):
"""
If no site was selected, selects the site used to create the article
as the default site.
Returns True if an additional save is required, False otherwise.
"""
if not len(self.sites.all()):
sites = Site.objects.all()
if hasattr(sites, 'using'):
sites = sites.using(using)
self.sites.add(sites.get(pk=settings.SITE_ID))
return True
return False
def get_unique_slug(self, slug, using=DEFAULT_DB):
"""Iterates until a unique slug is found"""
# we need a publish date before we can do anything meaningful
if type(self.publish_date) is not datetime:
return slug
orig_slug = slug
year = self.publish_date.year
counter = 1
while True:
not_unique = Article.objects.all()
if hasattr(not_unique, 'using'):
not_unique = not_unique.using(using)
not_unique = not_unique.filter(publish_date__year=year, slug=slug)
if len(not_unique) == 0:
return slug
slug = '%s-%s' % (orig_slug, counter)
counter += 1
def _get_article_links(self):
"""
Find all links in this article. When a link is encountered in the
article text, this will attempt to discover the title of the page it
links to. If there is a problem with the target page, or there is no
title (ie it's an image or other binary file), the text of the link is
used as the title. Once a title is determined, it is cached for a week
before it will be requested again.
"""
links = []
# find all links in the article
log.debug('Locating links in article: %s' % (self,))
for link in LINK_RE.finditer(self.rendered_content):
url = link.group(1)
log.debug('Do we have a title for "%s"?' % (url,))
key = 'href_title_' + sha1(url).hexdigest()
# look in the cache for the link target's title
title = cache.get(key)
if title is None:
log.debug('Nope... Getting it and caching it.')
title = link.group(2)
if LOOKUP_LINK_TITLE:
try:
log.debug('Looking up title for URL: %s' % (url,))
# open the URL
c = urllib.urlopen(url)
html = c.read()
c.close()
# try to determine the title of the target
title_m = TITLE_RE.search(html)
if title_m:
title = title_m.group(1)
log.debug('Found title: %s' % (title,))
except:
# if anything goes wrong (ie IOError), use the link's text
log.warn('Failed to retrieve the title for "%s"; using link text "%s"' % (url, title))
# cache the page title for a week
log.debug('Using "%s" as title for "%s"' % (title, url))
cache.set(key, title, 604800)
# add it to the list of links and titles
if url not in (l[0] for l in links):
links.append((url, title))
return tuple(links)
links = property(_get_article_links)
def _get_word_count(self):
"""Stupid word counter for an article."""
return len(striptags(self.rendered_content).split(' '))
word_count = property(_get_word_count)
@models.permalink
def get_absolute_url(self):
return ('articles_display_article', (self.publish_date.year, self.slug))
def _get_teaser(self):
"""
Retrieve some part of the article or the article's description.
"""
if not self._teaser:
if len(self.description.strip()):
self._teaser = self.description
else:
self._teaser = truncate_html_words(self.rendered_content, WORD_LIMIT)
return self._teaser
teaser = property(_get_teaser)
def get_next_article(self):
"""Determines the next live article"""
if not self._next:
try:
qs = Article.objects.live().exclude(id__exact=self.id)
article = qs.filter(publish_date__gte=self.publish_date).order_by('publish_date')[0]
except (Article.DoesNotExist, IndexError):
article = None
self._next = article
return self._next
def get_previous_article(self):
"""Determines the previous live article"""
if not self._previous:
try:
qs = Article.objects.live().exclude(id__exact=self.id)
article = qs.filter(publish_date__lte=self.publish_date).order_by('-publish_date')[0]
except (Article.DoesNotExist, IndexError):
article = None
self._previous = article
return self._previous
class Meta:
ordering = ('-publish_date', 'title')
get_latest_by = 'publish_date'
class Attachment(models.Model):
upload_to = lambda inst, fn: 'attach/%s/%s/%s' % (datetime.now().year, inst.article.slug, fn)
article = models.ForeignKey(Article, related_name='attachments')
attachment = models.FileField(upload_to=upload_to)
caption = models.CharField(max_length=255, blank=True)
class Meta:
ordering = ('-article', 'id')
def __unicode__(self):
return u'%s: %s' % (self.article, self.caption)
@property
def filename(self):
return self.attachment.name.split('/')[-1]
@property
def content_type_class(self):
mt = mimetypes.guess_type(self.attachment.path)[0]
if mt:
content_type = mt.replace('/', '_')
else:
# assume everything else is text/plain
content_type = 'text_plain'
return content_type
| |
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import fixtures
import mock
from six.moves import builtins
from nova import exception
from nova.pci import utils
from nova import test
class PciDeviceMatchTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceMatchTestCase, self).setUp()
self.fake_pci_1 = {'vendor_id': 'v1',
'device_id': 'd1'}
def test_single_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1, [{'vendor_id': 'v1', 'device_id': 'd1'}]))
def test_multiple_spec_match(self):
self.assertTrue(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_dismatch(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
def test_spec_extra_key(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v1', 'device_id': 'd1', 'wrong_key': 'k1'}]))
class PciDeviceAddressParserTestCase(test.NoDBTestCase):
def test_parse_address(self):
self.parse_result = utils.parse_address("0000:04:12.6")
self.assertEqual(self.parse_result, ('0000', '04', '12', '6'))
def test_parse_address_wrong(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:04.12:6")
def test_parse_address_invalid_character(self):
self.assertRaises(exception.PciDeviceWrongAddressFormat,
utils.parse_address, "0000:h4.12:6")
class GetFunctionByIfnameTestCase(test.NoDBTestCase):
@mock.patch('os.path.isdir', return_value=True)
@mock.patch.object(os, 'readlink')
def test_virtual_function(self, mock_readlink, *args):
mock_readlink.return_value = '../../../0000.00.00.1'
with mock.patch.object(
builtins, 'open', side_effect=IOError()):
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000.00.00.1')
self.assertFalse(physical_function)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch.object(os, 'readlink')
def test_physical_function(self, mock_readlink, *args):
ifname = 'eth0'
totalvf_path = "/sys/class/net/%s/device/%s" % (ifname,
utils._SRIOV_TOTALVFS)
mock_readlink.return_value = '../../../0000:00:00.1'
with mock.patch.object(
builtins, 'open', mock.mock_open(read_data='4')) as mock_open:
address, physical_function = utils.get_function_by_ifname('eth0')
self.assertEqual(address, '0000:00:00.1')
self.assertTrue(physical_function)
mock_open.assert_called_once_with(totalvf_path)
@mock.patch('os.path.isdir', return_value=False)
def test_exception(self, *args):
address, physical_function = utils.get_function_by_ifname('lo')
self.assertIsNone(address)
self.assertFalse(physical_function)
class IsPhysicalFunctionTestCase(test.NoDBTestCase):
def setUp(self):
super(IsPhysicalFunctionTestCase, self).setUp()
self.pci_args = utils.get_pci_address_fields('0000:00:00.1')
@mock.patch('os.path.isdir', return_value=True)
def test_virtual_function(self, *args):
with mock.patch.object(
builtins, 'open', side_effect=IOError()):
self.assertFalse(utils.is_physical_function(*self.pci_args))
@mock.patch('os.path.isdir', return_value=True)
def test_physical_function(self, *args):
with mock.patch.object(
builtins, 'open', mock.mock_open(read_data='4')):
self.assertTrue(utils.is_physical_function(*self.pci_args))
@mock.patch('os.path.isdir', return_value=False)
def test_exception(self, *args):
self.assertFalse(utils.is_physical_function(*self.pci_args))
class GetIfnameByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetIfnameByPciAddressTestCase, self).setUp()
self.pci_address = '0000:00:00.1'
@mock.patch.object(os, 'listdir')
def test_physical_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=True)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_virtual_function_inferface_name(self, mock_listdir):
mock_listdir.return_value = ['foo', 'bar']
ifname = utils.get_ifname_by_pci_address(
self.pci_address, pf_interface=False)
self.assertEqual(ifname, 'bar')
@mock.patch.object(os, 'listdir')
def test_exception(self, mock_listdir):
mock_listdir.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_ifname_by_pci_address,
self.pci_address
)
class GetMacByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetMacByPciAddressTestCase, self).setUp()
self.pci_address = '0000:07:00.1'
self.if_name = 'enp7s0f1'
self.tmpdir = self.useFixture(fixtures.TempDir())
self.fake_file = os.path.join(self.tmpdir.path, "address")
with open(self.fake_file, "w") as f:
f.write("a0:36:9f:72:00:00\n")
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac(self, mock_join, mock_listdir):
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
mac = utils.get_mac_by_pci_address(self.pci_address)
mock_join.assert_called_once_with(
"/sys/bus/pci/devices/%s/net" % self.pci_address, self.if_name,
"address")
self.assertEqual("a0:36:9f:72:00:00", mac)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac_fails(self, mock_join, mock_listdir):
os.unlink(self.fake_file)
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_mac_by_pci_address, self.pci_address)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_mac_fails_empty(self, mock_join, mock_listdir):
with open(self.fake_file, "w") as f:
f.truncate(0)
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_mac_by_pci_address, self.pci_address)
@mock.patch.object(os, 'listdir')
@mock.patch.object(os.path, 'join')
def test_get_physical_function_mac(self, mock_join, mock_listdir):
mock_listdir.return_value = [self.if_name]
mock_join.return_value = self.fake_file
mac = utils.get_mac_by_pci_address(self.pci_address, pf_interface=True)
mock_join.assert_called_once_with(
"/sys/bus/pci/devices/%s/physfn/net" % self.pci_address,
self.if_name, "address")
self.assertEqual("a0:36:9f:72:00:00", mac)
class GetVfNumByPciAddressTestCase(test.NoDBTestCase):
def setUp(self):
super(GetVfNumByPciAddressTestCase, self).setUp()
self.pci_address = '0000:00:00.1'
self.paths = [
'/sys/bus/pci/devices/0000:00:00.1/physfn/virtfn3',
]
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.1'
vf_num = utils.get_vf_num_by_pci_address(self.pci_address)
self.assertEqual(vf_num, '3')
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_vf_number_not_found(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.2'
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_vf_num_by_pci_address,
self.pci_address
)
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
def test_exception(self, mock_iglob, mock_readlink):
mock_iglob.return_value = self.paths
mock_readlink.side_effect = OSError('No such file or directory')
self.assertRaises(
exception.PciDeviceNotFoundById,
utils.get_vf_num_by_pci_address,
self.pci_address
)
| |
# coding=utf-8
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from testtools import matchers
from ironic.common import context
from ironic.common import exception
from ironic import objects
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils
from ironic.tests.unit.objects import utils as obj_utils
class TestNodeObject(base.DbTestCase):
def setUp(self):
super(TestNodeObject, self).setUp()
self.ctxt = context.get_admin_context()
self.fake_node = utils.get_test_node()
self.node = obj_utils.get_test_node(self.ctxt, **self.fake_node)
def test_get_by_id(self):
node_id = self.fake_node['id']
with mock.patch.object(self.dbapi, 'get_node_by_id',
autospec=True) as mock_get_node:
mock_get_node.return_value = self.fake_node
node = objects.Node.get(self.context, node_id)
mock_get_node.assert_called_once_with(node_id)
self.assertEqual(self.context, node._context)
def test_get_by_uuid(self):
uuid = self.fake_node['uuid']
with mock.patch.object(self.dbapi, 'get_node_by_uuid',
autospec=True) as mock_get_node:
mock_get_node.return_value = self.fake_node
node = objects.Node.get(self.context, uuid)
mock_get_node.assert_called_once_with(uuid)
self.assertEqual(self.context, node._context)
def test_get_bad_id_and_uuid(self):
self.assertRaises(exception.InvalidIdentity,
objects.Node.get, self.context, 'not-a-uuid')
def test_get_by_port_addresses(self):
with mock.patch.object(self.dbapi, 'get_node_by_port_addresses',
autospec=True) as mock_get_node:
mock_get_node.return_value = self.fake_node
node = objects.Node.get_by_port_addresses(self.context,
['aa:bb:cc:dd:ee:ff'])
mock_get_node.assert_called_once_with(['aa:bb:cc:dd:ee:ff'])
self.assertEqual(self.context, node._context)
def test_save(self):
uuid = self.fake_node['uuid']
with mock.patch.object(self.dbapi, 'get_node_by_uuid',
autospec=True) as mock_get_node:
mock_get_node.return_value = self.fake_node
with mock.patch.object(self.dbapi, 'update_node',
autospec=True) as mock_update_node:
mock_update_node.return_value = utils.get_test_node()
n = objects.Node.get(self.context, uuid)
self.assertEqual({"private_state": "secret value"},
n.driver_internal_info)
n.properties = {"fake": "property"}
n.driver = "fake-driver"
n.save()
mock_get_node.assert_called_once_with(uuid)
mock_update_node.assert_called_once_with(
uuid, {'properties': {"fake": "property"},
'driver': 'fake-driver',
'driver_internal_info': {}})
self.assertEqual(self.context, n._context)
self.assertEqual({}, n.driver_internal_info)
def test_save_updated_at_field(self):
uuid = self.fake_node['uuid']
extra = {"test": 123}
test_time = datetime.datetime(2000, 1, 1, 0, 0)
with mock.patch.object(self.dbapi, 'get_node_by_uuid',
autospec=True) as mock_get_node:
mock_get_node.return_value = self.fake_node
with mock.patch.object(self.dbapi, 'update_node',
autospec=True) as mock_update_node:
mock_update_node.return_value = (
utils.get_test_node(extra=extra, updated_at=test_time))
n = objects.Node.get(self.context, uuid)
self.assertEqual({"private_state": "secret value"},
n.driver_internal_info)
n.properties = {"fake": "property"}
n.extra = extra
n.driver = "fake-driver"
n.driver_internal_info = {}
n.save()
mock_get_node.assert_called_once_with(uuid)
mock_update_node.assert_called_once_with(
uuid, {'properties': {"fake": "property"},
'driver': 'fake-driver',
'driver_internal_info': {},
'extra': {'test': 123}})
self.assertEqual(self.context, n._context)
res_updated_at = n.updated_at.replace(tzinfo=None)
self.assertEqual(test_time, res_updated_at)
def test_refresh(self):
uuid = self.fake_node['uuid']
returns = [dict(self.fake_node, properties={"fake": "first"}),
dict(self.fake_node, properties={"fake": "second"})]
expected = [mock.call(uuid), mock.call(uuid)]
with mock.patch.object(self.dbapi, 'get_node_by_uuid',
side_effect=returns,
autospec=True) as mock_get_node:
n = objects.Node.get(self.context, uuid)
self.assertEqual({"fake": "first"}, n.properties)
n.refresh()
self.assertEqual({"fake": "second"}, n.properties)
self.assertEqual(expected, mock_get_node.call_args_list)
self.assertEqual(self.context, n._context)
def test_save_after_refresh(self):
# Ensure that it's possible to do object.save() after object.refresh()
db_node = utils.create_test_node()
n = objects.Node.get_by_uuid(self.context, db_node.uuid)
n_copy = objects.Node.get_by_uuid(self.context, db_node.uuid)
n.name = 'b240'
n.save()
n_copy.refresh()
n_copy.name = 'aaff'
# Ensure this passes and an exception is not generated
n_copy.save()
def test_list(self):
with mock.patch.object(self.dbapi, 'get_node_list',
autospec=True) as mock_get_list:
mock_get_list.return_value = [self.fake_node]
nodes = objects.Node.list(self.context)
self.assertThat(nodes, matchers.HasLength(1))
self.assertIsInstance(nodes[0], objects.Node)
self.assertEqual(self.context, nodes[0]._context)
def test_reserve(self):
with mock.patch.object(self.dbapi, 'reserve_node',
autospec=True) as mock_reserve:
mock_reserve.return_value = self.fake_node
node_id = self.fake_node['id']
fake_tag = 'fake-tag'
node = objects.Node.reserve(self.context, fake_tag, node_id)
self.assertIsInstance(node, objects.Node)
mock_reserve.assert_called_once_with(fake_tag, node_id)
self.assertEqual(self.context, node._context)
def test_reserve_node_not_found(self):
with mock.patch.object(self.dbapi, 'reserve_node',
autospec=True) as mock_reserve:
node_id = 'non-existent'
mock_reserve.side_effect = exception.NodeNotFound(node=node_id)
self.assertRaises(exception.NodeNotFound,
objects.Node.reserve, self.context, 'fake-tag',
node_id)
def test_release(self):
with mock.patch.object(self.dbapi, 'release_node',
autospec=True) as mock_release:
node_id = self.fake_node['id']
fake_tag = 'fake-tag'
objects.Node.release(self.context, fake_tag, node_id)
mock_release.assert_called_once_with(fake_tag, node_id)
def test_release_node_not_found(self):
with mock.patch.object(self.dbapi, 'release_node',
autospec=True) as mock_release:
node_id = 'non-existent'
mock_release.side_effect = exception.NodeNotFound(node=node_id)
self.assertRaises(exception.NodeNotFound,
objects.Node.release, self.context,
'fake-tag', node_id)
def test_touch_provisioning(self):
with mock.patch.object(self.dbapi, 'get_node_by_uuid',
autospec=True) as mock_get_node:
mock_get_node.return_value = self.fake_node
with mock.patch.object(self.dbapi, 'touch_node_provisioning',
autospec=True) as mock_touch:
node = objects.Node.get(self.context, self.fake_node['uuid'])
node.touch_provisioning()
mock_touch.assert_called_once_with(node.id)
def test_create(self):
node = objects.Node(self.context, **self.fake_node)
node.create()
def test_create_with_invalid_properties(self):
node = objects.Node(self.context, **self.fake_node)
node.properties = {"local_gb": "5G"}
self.assertRaises(exception.InvalidParameterValue, node.create)
def test_update_with_invalid_properties(self):
uuid = self.fake_node['uuid']
with mock.patch.object(self.dbapi, 'get_node_by_uuid',
autospec=True) as mock_get_node:
mock_get_node.return_value = self.fake_node
node = objects.Node.get(self.context, uuid)
node.properties = {"local_gb": "5G", "memory_mb": "5",
'cpus': '-1', 'cpu_arch': 'x86_64'}
self.assertRaisesRegex(exception.InvalidParameterValue,
".*local_gb=5G, cpus=-1$", node.save)
mock_get_node.assert_called_once_with(uuid)
def test__validate_property_values_success(self):
uuid = self.fake_node['uuid']
with mock.patch.object(self.dbapi, 'get_node_by_uuid',
autospec=True) as mock_get_node:
mock_get_node.return_value = self.fake_node
node = objects.Node.get(self.context, uuid)
values = self.fake_node
expect = {
'cpu_arch': 'x86_64',
"cpus": '8',
"local_gb": '10',
"memory_mb": '4096',
}
node._validate_property_values(values['properties'])
self.assertEqual(expect, values['properties'])
def test_payload_schemas(self):
"""Assert that the node's Payload SCHEMAs have the expected properties.
A payload's SCHEMA should:
1. Have each of its keys in the payload's fields
2. Have each member of the schema match with a corresponding field
in the Node object
"""
payloads = obj_utils.get_payloads_with_schemas(objects.node)
for payload in payloads:
for schema_key in payload.SCHEMA:
self.assertIn(schema_key, payload.fields,
"for %s, schema key %s is not in fields"
% (payload, schema_key))
node_key = payload.SCHEMA[schema_key][1]
self.assertIn(node_key, objects.Node.fields,
"for %s, schema key %s has invalid node field %s"
% (payload, schema_key, node_key))
| |
"""
This module defines a container for API requests.
Communication between URL requests and UserMetric
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
RequestMeta is a recordtype_ type (a mutable namedtuple) that is
dynamically built at runtime to store API request parameters. The
list REQUEST_META_QUERY_STR contains all the possible query string
variables that may be accepted by a request while REQUEST_META_BASE
defines the URL path meta data (cohort and metric handles). The
factory method RequestMetaFactory is invoked by run.py to build
a RequestMeta object. For example::
rm = RequestMetaFactory("cohort name", "cohort timestamp", "metric")
Finally, a mediator_ pattern via the varMapping namedtuple type is
used to bind the names of URL request variables to corresponding UserMetric
parameter names. The definition of the mapping lives in this module.
The factory method returns the newly built RequestMeta which may then
be populated with parameter values. The ``process_request_params`` method
applies defaults to RequestMeta objects. The run module handles assigning
request values to RequestMeta attributes and coordinating the passage of
this data to UserMetric objects via request_manager_ using the
``process_metrics`` method.
.. _recordtype: http://www.python.org/
.. _mediator: http://en.wikipedia.org/wiki/Mediator_pattern
.. _request_manager: http://www.python.org/
"""
__author__ = "ryan faulkner"
__email__ = "rfaulkner@wikimedia.org"
__date__ = "2013-03-05"
__license__ = "GPL (version 2 or later)"
from user_metrics.utils import format_mediawiki_timestamp, enum
from user_metrics.utils.record_type import recordtype
from user_metrics.api import MetricsAPIError
from user_metrics.api.engine import DEFAULT_QUERY_VAL
from user_metrics.metrics.users import USER_METRIC_PERIOD_TYPE
from collections import namedtuple, OrderedDict
from flask import escape
from user_metrics.config import logging
from user_metrics.utils import unpack_fields
# DEFINE REQUEST META OBJECT, CREATION, AND PROCESSING
# ####################################################
DEFAULT_PROJECT = 'enwiki'
# Default group + structure that maps values in the query string to new ones
DEFAULT_GROUP = 'reg'
REQUEST_VALUE_MAPPING = {
'group': {
'reg': USER_METRIC_PERIOD_TYPE.REGISTRATION,
'activity': USER_METRIC_PERIOD_TYPE.INPUT,
}
}
def RequestMetaFactory(cohort_expr, cohort_gen_timestamp, metric_expr):
"""
Dynamically builds a record type given a metric handle
All args must be strings representing a cohort, last updated
timestamp, and metric respectively.
**cohort_expr** - string. Cohort id from url.
**cohort_gen_timestamp** - string. Timestamp of last cohort
update.
**metric_expr** - string. Metric id from url.
"""
default_params = 'cohort_expr cohort_gen_timestamp metric '
additional_params = ''
try:
metric_params = ParameterMapping.QUERY_PARAMS_BY_METRIC[metric_expr]
except KeyError:
raise MetricsAPIError('Bad metric name.', error_code=4)
for val in metric_params:
additional_params += val.query_var + ' '
additional_params = additional_params[:-1]
params = default_params + additional_params
arg_list = ['cohort_expr', 'cohort_gen_timestamp', 'metric_expr'] +\
['None'] * \
len(ParameterMapping.QUERY_PARAMS_BY_METRIC[metric_expr])
arg_str = "(" + ",".join(arg_list) + ")"
rt = recordtype("RequestMeta", params)
return eval('rt' + arg_str)
# Defines what variables may be extracted from the query string
REQUEST_META_QUERY_STR = ['aggregator', 'time_series', 'project', 'namespace',
'start', 'end', 'slice', 't', 'n',
'time_unit', 'time_unit_count', 'look_ahead',
'look_back', 'threshold_type', 'group', 'is_user']
# Defines which variables may be taken from the URL path
REQUEST_META_BASE = ['cohort_expr', 'metric']
def format_request_params(request_meta):
"""
Formats request data and ensures that it is clean using Flask escape
functionality.
Parameters
~~~~~~~~~~
request_meta : recordtype:
Stores the request data.
"""
# Handle any datetime fields passed - raise an exception if the
# formatting is incorrect
if request_meta.start:
try:
request_meta.start = format_mediawiki_timestamp(
escape(request_meta.start))
except ValueError:
# Pass the value of the error code in `error_codes`
raise MetricsAPIError(error_code=1)
if request_meta.end:
try:
request_meta.end = format_mediawiki_timestamp(
escape(request_meta.end))
except ValueError:
# Pass the value of the error code in `error_codes`
raise MetricsAPIError(error_code=1)
if not request_meta.project:
request_meta.project = DEFAULT_PROJECT
if not request_meta.group in REQUEST_VALUE_MAPPING:
request_meta.group = DEFAULT_GROUP
# set the aggregator if there is one
agg_key = get_agg_key(request_meta.aggregator, request_meta.metric)
request_meta.aggregator = escape(request_meta.aggregator)\
if agg_key else None
# @TODO Escape remaining input
# MAP request values.
_map_request_values(request_meta)
def _map_request_values(request_meta):
"""
Map values from the request. Use ``REQUEST_VALUE_MAPPING`` convert
coded values from the request if a familiar encoding is present.
Parameters
~~~~~~~~~~
request_meta : recordtype:
Stores the request data.
"""
for attr in REQUEST_VALUE_MAPPING:
if hasattr(request_meta, attr):
request_value = None
try:
request_value = getattr(request_meta, attr)
map_val = REQUEST_VALUE_MAPPING[attr][request_value]
setattr(request_meta, attr, map_val)
except KeyError:
logging.error(__name__ + ' :: Could not map request value '
'{0} for variable {1}.'.
format(str(request_value), attr))
def filter_request_input(request, request_meta_obj):
"""
Filters for relevant request data and sets RequestMeta object.
Parameters
~~~~~~~~~~
**request_meta_obj** - RequestMeta object to store relevant request
data
**request** - Flask request object containing all request data
"""
if not hasattr(request, 'args'):
raise MetricsAPIError('Flask request must have "args" attribute.')
for param in REQUEST_META_QUERY_STR:
if param in request.args and hasattr(request_meta_obj, param):
if not request.args[param]:
# Assign a value indicating presence of a query var
setattr(request_meta_obj, param, DEFAULT_QUERY_VAL)
else:
setattr(request_meta_obj, param, request.args[param])
def rebuild_unpacked_request(unpacked_req):
"""
Takes an unpacked (user_metrics.utils.unpack_fields) RequestMeta object
and composes a RequestMeta object
Parameters
~~~~~~~~~~
unpacked_req : dict
This dictionary contains keys that map to the attributes of the
``RequestMeta`` type.
"""
try:
# Build the request item
rm = RequestMetaFactory(unpacked_req['cohort_expr'],
unpacked_req['cohort_gen_timestamp'],
unpacked_req['metric'])
# Populate the request data
for key in unpacked_req:
if unpacked_req[key]:
setattr(rm, key, unpacked_req[key])
return rm
except KeyError:
raise MetricsAPIError(__name__ + ' :: rebuild_unpacked_request - '
'Invalid fields.')
# DEFINE MAPPING AMONG API REQUESTS AND METRICS
# #############################################
class ParameterMapping(object):
"""
Using the **Mediator** model :: Defines the query parameters accepted by
each metric request. This is a dict keyed on metric that stores a list
of tuples. Each tuple defines:
(<name of allowable query string var>, <name of corresponding
metric param>)
"""
# Singleton instance
__instance = None
def __init__(self):
""" Initialize the Singleton instance """
self.__class__.__instance = self
def __new__(cls):
""" This class is Singleton, return only one instance """
if not cls.__instance:
cls.__instance = super(ParameterMapping, cls).__new__(cls)
return cls.__instance
# defines a tuple for mapped variable names
varMapping = namedtuple("VarMapping", "query_var metric_var")
common_params = [varMapping('start', 'datetime_start'),
varMapping('end', 'datetime_end'),
varMapping('project', 'project'),
varMapping('namespace', 'namespace'),
varMapping('slice', 'slice'),
varMapping('time_series', 'time_series'),
varMapping('aggregator', 'aggregator'),
varMapping('t', 't'),
varMapping('group', 'group'),
varMapping('is_user', 'is_user')]
QUERY_PARAMS_BY_METRIC = {
'blocks': common_params,
'bytes_added': common_params,
'edit_count': common_params,
'edit_rate': common_params + [varMapping('time_unit', 'time_unit'),
varMapping('time_unit_count',
'time_unit_count')],
'live_account': common_params,
'namespace_edits': common_params,
'revert_rate': common_params + [varMapping('look_back', 'look_back'),
varMapping('look_ahead',
'look_ahead')],
'survival': common_params,
'pages_created': common_params,
'threshold': common_params + [varMapping('n', 'n')],
'time_to_threshold': common_params +
[varMapping('threshold_type', 'threshold_type_class')],
}
@staticmethod
def map(request_meta):
"""
Unpack RequestMeta into dict using MEDIATOR Map parameters from
API request to metrics call.
"""
args = unpack_fields(request_meta)
new_args = OrderedDict()
for mapping in ParameterMapping.\
QUERY_PARAMS_BY_METRIC[request_meta.metric]:
new_args[mapping.metric_var] = args[mapping.query_var]
return new_args
# DEFINE METRIC AND AGGREGATOR ENUMS ALLOWABLE IN REQUESTS
# ########################################################
from user_metrics.metrics.threshold import Threshold, threshold_editors_agg
from user_metrics.metrics.blocks import Blocks, block_rate_agg
from user_metrics.metrics.bytes_added import BytesAdded, ba_median_agg, \
ba_min_agg, ba_max_agg, ba_sum_agg, ba_mean_agg, ba_std_agg
from user_metrics.metrics.survival import Survival, survival_editors_agg
from user_metrics.metrics.revert_rate import RevertRate, revert_rate_avg
from user_metrics.metrics.time_to_threshold import TimeToThreshold, \
ttt_avg_agg, ttt_stats_agg
from user_metrics.metrics.edit_rate import EditRate, edit_rate_agg, \
er_stats_agg
from user_metrics.metrics.namespace_of_edits import NamespaceEdits, \
namespace_edits_sum
from user_metrics.metrics.live_account import LiveAccount, live_accounts_agg
from user_metrics.metrics.pages_created import PagesCreated
# Registered metrics types
metric_dict =\
{
'threshold': Threshold,
'survival': Survival,
'revert_rate': RevertRate,
'bytes_added': BytesAdded,
'blocks': Blocks,
'time_to_threshold': TimeToThreshold,
'edit_rate': EditRate,
'namespace_edits': NamespaceEdits,
'live_account': LiveAccount,
'pages_created': PagesCreated,
}
# @TODO: let metric types handle this mapping themselves and obsolete this
# structure
aggregator_dict =\
{
'sum+bytes_added': ba_sum_agg,
'mean+bytes_added': ba_mean_agg,
'std+bytes_added': ba_std_agg,
'sum+namespace_edits': namespace_edits_sum,
'proportion+threshold': threshold_editors_agg,
'proportion+survival': survival_editors_agg,
'proportion+live_account': live_accounts_agg,
'mean+revert_rate': revert_rate_avg,
'mean+edit_rate': edit_rate_agg,
'mean+time_to_threshold': ttt_avg_agg,
'median+bytes_added': ba_median_agg,
'min+bytes_added': ba_min_agg,
'max+bytes_added': ba_max_agg,
'dist+edit_rate': er_stats_agg,
'proportion+blocks': block_rate_agg,
'dist+time_to_threshold': ttt_stats_agg,
'dist+pages_created': ttt_stats_agg,
}
def get_metric_type(metric):
return metric_dict[metric]
def get_aggregator_type(agg):
try:
return aggregator_dict[agg]
except KeyError:
raise MetricsAPIError(__name__ + ' :: Bad aggregator name.')
def get_metric_names():
""" Returns the names of metric handles as defined by this module """
return metric_dict.keys()
def get_aggregator_names():
""" Returns the names of metric handles as defined by this module """
return aggregator_dict.keys()
def get_param_types(metric_handle):
""" Get the paramters for a given metric handle """
return metric_dict[metric_handle]()._param_types
def get_agg_key(agg_handle, metric_handle):
""" Compose the metric dependent aggregator handle """
try:
agg_key = '+'.join([agg_handle, metric_handle])
if agg_key in aggregator_dict:
return agg_key
else:
return ''
except TypeError:
return ''
# Define Types of requests handled by the manager
# ###############################################
# Enumeration to store request types
request_types = enum(time_series='time_series',
aggregator='aggregator',
raw='raw')
def get_request_type(request_meta):
""" Determines request type. """
if request_meta.aggregator and request_meta.time_series \
and request_meta.group and request_meta.slice and request_meta.start \
and request_meta.end:
return request_types.time_series
elif request_meta.aggregator:
return request_types.aggregator
else:
return request_types.raw
| |
import collections
import numpy as np
import logging
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
from ray.tune.trial import Trial
from ray.tune.error import TuneError
logger = logging.getLogger(__name__)
# Implementation notes:
# This implementation contains 3 logical levels.
# Each HyperBand iteration is a "band". There can be multiple
# bands running at once, and there can be 1 band that is incomplete.
#
# In each band, there are at most `s` + 1 brackets.
# `s` is a value determined by given parameters, and assigned on
# a cyclic basis.
#
# In each bracket, there are at most `n(s)` trials, indicating that
# `n` is a function of `s`. These trials go through a series of
# halving procedures, dropping lowest performers. Multiple
# brackets are running at once.
#
# Trials added will be inserted into the most recent bracket
# and band and will spill over to new brackets/bands accordingly.
#
# This maintains the bracket size and max trial count per band
# to 5 and 117 respectively, which correspond to that of
# `max_attr=81, eta=3` from the blog post. Trials will fill up
# from smallest bracket to largest, with largest
# having the most rounds of successive halving.
class HyperBandScheduler(FIFOScheduler):
"""Implements the HyperBand early stopping algorithm.
HyperBandScheduler early stops trials using the HyperBand optimization
algorithm. It divides trials into brackets of varying sizes, and
periodically early stops low-performing trials within each bracket.
To use this implementation of HyperBand with Tune, all you need
to do is specify the max length of time a trial can run `max_t`, the time
units `time_attr`, the name of the reported objective value `metric`,
and if `metric` is to be maximized or minimized (`mode`).
We automatically determine reasonable values for the other
HyperBand parameters based on the given values.
For example, to limit trials to 10 minutes and early stop based on the
`episode_mean_reward` attr, construct:
``HyperBand('time_total_s', 'episode_reward_mean', max_t=600)``
Note that Tune's stopping criteria will be applied in conjunction with
HyperBand's early stopping mechanisms.
See also: https://people.eecs.berkeley.edu/~kjamieson/hyperband.html
Args:
time_attr (str): The training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
max_t (int): max time units per trial. Trials will be stopped after
max_t time units (determined by time_attr) have passed.
The scheduler will terminate trials after this time has passed.
Note that this is different from the semantics of `max_t` as
mentioned in the original HyperBand paper.
reduction_factor (float): Same as `eta`. Determines how sharp
the difference is between bracket space-time allocation ratios.
"""
def __init__(self,
time_attr="training_iteration",
reward_attr=None,
metric="episode_reward_mean",
mode="max",
max_t=81,
reduction_factor=3):
assert max_t > 0, "Max (time_attr) not valid!"
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
FIFOScheduler.__init__(self)
self._eta = reduction_factor
self._s_max_1 = int(
np.round(np.log(max_t) / np.log(reduction_factor))) + 1
self._max_t_attr = max_t
# bracket max trials
self._get_n0 = lambda s: int(
np.ceil(self._s_max_1 / (s + 1) * self._eta**s))
# bracket initial iterations
self._get_r0 = lambda s: int((max_t * self._eta**(-s)))
self._hyperbands = [[]] # list of hyperband iterations
self._trial_info = {} # Stores Trial -> Bracket, Band Iteration
# Tracks state for new trial add
self._state = {"bracket": None, "band_idx": 0}
self._num_stopped = 0
self._metric = metric
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._time_attr = time_attr
def on_trial_add(self, trial_runner, trial):
"""Adds new trial.
On a new trial add, if current bracket is not filled,
add to current bracket. Else, if current band is not filled,
create new bracket, add to current bracket.
Else, create new iteration, create new bracket, add to bracket."""
cur_bracket = self._state["bracket"]
cur_band = self._hyperbands[self._state["band_idx"]]
if cur_bracket is None or cur_bracket.filled():
retry = True
while retry:
# if current iteration is filled, create new iteration
if self._cur_band_filled():
cur_band = []
self._hyperbands.append(cur_band)
self._state["band_idx"] += 1
# cur_band will always be less than s_max_1 or else filled
s = len(cur_band)
assert s < self._s_max_1, "Current band is filled!"
if self._get_r0(s) == 0:
logger.info("Bracket too small - Retrying...")
cur_bracket = None
else:
retry = False
cur_bracket = Bracket(self._time_attr, self._get_n0(s),
self._get_r0(s), self._max_t_attr,
self._eta, s)
cur_band.append(cur_bracket)
self._state["bracket"] = cur_bracket
self._state["bracket"].add_trial(trial)
self._trial_info[trial] = cur_bracket, self._state["band_idx"]
def _cur_band_filled(self):
"""Checks if the current band is filled.
The size of the current band should be equal to s_max_1"""
cur_band = self._hyperbands[self._state["band_idx"]]
return len(cur_band) == self._s_max_1
def on_trial_result(self, trial_runner, trial, result):
"""If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it."""
bracket, _ = self._trial_info[trial]
bracket.update_trial_stats(trial, result)
if bracket.continue_trial(trial):
return TrialScheduler.CONTINUE
action = self._process_bracket(trial_runner, bracket)
return action
def _process_bracket(self, trial_runner, bracket):
"""This is called whenever a trial makes progress.
When all live trials in the bracket have no more iterations left,
Trials will be successively halved. If bracket is done, all
non-running trials will be stopped and cleaned up,
and during each halving phase, bad trials will be stopped while good
trials will return to "PENDING"."""
action = TrialScheduler.PAUSE
if bracket.cur_iter_done():
if bracket.finished():
bracket.cleanup_full(trial_runner)
return TrialScheduler.STOP
good, bad = bracket.successive_halving(self._metric,
self._metric_op)
# kill bad trials
self._num_stopped += len(bad)
for t in bad:
if t.status == Trial.PAUSED:
trial_runner.stop_trial(t)
elif t.status == Trial.RUNNING:
bracket.cleanup_trial(t)
action = TrialScheduler.STOP
else:
raise TuneError("Trial with unexpected status encountered")
# ready the good trials - if trial is too far ahead, don't continue
for t in good:
if t.status not in [Trial.PAUSED, Trial.RUNNING]:
raise TuneError("Trial with unexpected status encountered")
if bracket.continue_trial(t):
if t.status == Trial.PAUSED:
self._unpause_trial(trial_runner, t)
elif t.status == Trial.RUNNING:
action = TrialScheduler.CONTINUE
return action
def on_trial_remove(self, trial_runner, trial):
"""Notification when trial terminates.
Trial info is removed from bracket. Triggers halving if bracket is
not finished."""
bracket, _ = self._trial_info[trial]
bracket.cleanup_trial(trial)
if not bracket.finished():
self._process_bracket(trial_runner, bracket)
def on_trial_complete(self, trial_runner, trial, result):
"""Cleans up trial info from bracket if trial completed early."""
self.on_trial_remove(trial_runner, trial)
def on_trial_error(self, trial_runner, trial):
"""Cleans up trial info from bracket if trial errored early."""
self.on_trial_remove(trial_runner, trial)
def choose_trial_to_run(self, trial_runner):
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in sorted(
scrubbed, key=lambda b: b.completion_percentage()):
for trial in bracket.current_trials():
if (trial.status == Trial.PENDING
and trial_runner.has_resources(trial.resources)):
return trial
return None
def debug_string(self):
"""This provides a progress notification for the algorithm.
For each bracket, the algorithm will output a string as follows:
Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%):
{PENDING: 2, RUNNING: 3, TERMINATED: 2}
"Max Size" indicates the max number of pending/running experiments
set according to the Hyperband algorithm.
"Milestone" indicates the iterations a trial will run for before
the next halving will occur.
"Completed" indicates an approximate progress metric. Some brackets,
like ones that are unfilled, will not reach 100%.
"""
out = "Using HyperBand: "
out += "num_stopped={} total_brackets={}".format(
self._num_stopped, sum(len(band) for band in self._hyperbands))
for i, band in enumerate(self._hyperbands):
out += "\nRound #{}:".format(i)
for bracket in band:
out += "\n {}".format(bracket)
return out
def state(self):
return {
"num_brackets": sum(len(band) for band in self._hyperbands),
"num_stopped": self._num_stopped
}
def _unpause_trial(self, trial_runner, trial):
trial_runner.trial_executor.unpause_trial(trial)
class Bracket:
"""Logical object for tracking Hyperband bracket progress. Keeps track
of proper parameters as designated by HyperBand.
Also keeps track of progress to ensure good scheduling.
"""
def __init__(self, time_attr, max_trials, init_t_attr, max_t_attr, eta, s):
self._live_trials = {} # maps trial -> current result
self._all_trials = []
self._time_attr = time_attr # attribute to
self._n = self._n0 = max_trials
self._r = self._r0 = init_t_attr
self._max_t_attr = max_t_attr
self._cumul_r = self._r0
self._eta = eta
self._halves = s
self._total_work = self._calculate_total_work(self._n0, self._r0, s)
self._completed_progress = 0
def add_trial(self, trial):
"""Add trial to bracket assuming bracket is not filled.
At a later iteration, a newly added trial will be given equal
opportunity to catch up."""
assert not self.filled(), "Cannot add trial to filled bracket!"
self._live_trials[trial] = None
self._all_trials.append(trial)
def cur_iter_done(self):
"""Checks if all iterations have completed.
TODO(rliaw): also check that `t.iterations == self._r`"""
return all(
self._get_result_time(result) >= self._cumul_r
for result in self._live_trials.values())
def finished(self):
return self._halves == 0 and self.cur_iter_done()
def current_trials(self):
return list(self._live_trials)
def continue_trial(self, trial):
result = self._live_trials[trial]
if self._get_result_time(result) < self._cumul_r:
return True
else:
return False
def filled(self):
"""Checks if bracket is filled.
Only let new trials be added at current level minimizing the need
to backtrack and bookkeep previous medians."""
return len(self._live_trials) == self._n
def successive_halving(self, metric, metric_op):
assert self._halves > 0
self._halves -= 1
self._n /= self._eta
self._n = int(np.ceil(self._n))
self._r *= self._eta
self._r = int(min(self._r, self._max_t_attr - self._cumul_r))
self._cumul_r = self._r
sorted_trials = sorted(
self._live_trials,
key=lambda t: metric_op * self._live_trials[t][metric])
good, bad = sorted_trials[-self._n:], sorted_trials[:-self._n]
return good, bad
def update_trial_stats(self, trial, result):
"""Update result for trial. Called after trial has finished
an iteration - will decrement iteration count.
TODO(rliaw): The other alternative is to keep the trials
in and make sure they're not set as pending later."""
assert trial in self._live_trials
assert self._get_result_time(result) >= 0
delta = self._get_result_time(result) - \
self._get_result_time(self._live_trials[trial])
assert delta >= 0
self._completed_progress += delta
self._live_trials[trial] = result
def cleanup_trial(self, trial):
"""Clean up statistics tracking for terminated trials (either by force
or otherwise).
This may cause bad trials to continue for a long time, in the case
where all the good trials finish early and there are only bad trials
left in a bracket with a large max-iteration."""
assert trial in self._live_trials
del self._live_trials[trial]
def cleanup_full(self, trial_runner):
"""Cleans up bracket after bracket is completely finished.
Lets the last trial continue to run until termination condition
kicks in."""
for trial in self.current_trials():
if (trial.status == Trial.PAUSED):
trial_runner.stop_trial(trial)
def completion_percentage(self):
"""Returns a progress metric.
This will not be always finish with 100 since dead trials
are dropped."""
if self.finished():
return 1.0
return self._completed_progress / self._total_work
def _get_result_time(self, result):
if result is None:
return 0
return result[self._time_attr]
def _calculate_total_work(self, n, r, s):
work = 0
cumulative_r = r
for i in range(s + 1):
work += int(n) * int(r)
n /= self._eta
n = int(np.ceil(n))
r *= self._eta
r = int(min(r, self._max_t_attr - cumulative_r))
return work
def __repr__(self):
status = ", ".join([
"Max Size (n)={}".format(self._n),
"Milestone (r)={}".format(self._cumul_r),
"completed={:.1%}".format(self.completion_percentage())
])
counts = collections.Counter([t.status for t in self._all_trials])
trial_statuses = ", ".join(
sorted("{}: {}".format(k, v) for k, v in counts.items()))
return "Bracket({}): {{{}}} ".format(status, trial_statuses)
| |
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for ESX Networking.
"""
from oslo_log import log as logging
from oslo_vmware import exceptions as vexc
from oslo_vmware import vim_util as vutil
from nova import exception
from nova.i18n import _
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
def _get_network_obj(session, network_objects, network_name):
"""Gets the network object for the requested network.
The network object will be used when creating the VM configuration
spec. The network object contains the relevant network details for
the specific network type, for example, a distributed port group.
The method will search for the network_name in the list of
network_objects.
:param session: vCenter soap session
:param network_objects: group of networks
:param network_name: the requested network
:return: network object
"""
network_obj = {}
# network_objects is actually a RetrieveResult object from vSphere API call
for obj_content in network_objects:
# the propset attribute "need not be set" by returning API
if not hasattr(obj_content, 'propSet'):
continue
prop_dict = vm_util.propset_dict(obj_content.propSet)
network_refs = prop_dict.get('network')
if network_refs:
network_refs = network_refs.ManagedObjectReference
for network in network_refs:
# Get network properties
if network._type == 'DistributedVirtualPortgroup':
props = session._call_method(vutil,
"get_object_property",
network,
"config")
# NOTE(asomya): This only works on ESXi if the port binding
# is set to ephemeral
# For a VLAN the network name will be the UUID. For a VXLAN
# network this will have a VXLAN prefix and then the
# network name.
if network_name in props.name:
network_obj['type'] = 'DistributedVirtualPortgroup'
network_obj['dvpg'] = props.key
dvs_props = session._call_method(vutil,
"get_object_property",
props.distributedVirtualSwitch,
"uuid")
network_obj['dvsw'] = dvs_props
return network_obj
else:
props = session._call_method(vutil,
"get_object_property",
network,
"summary.name")
if props == network_name:
network_obj['type'] = 'Network'
network_obj['name'] = network_name
return network_obj
def get_network_with_the_name(session, network_name="vmnet0", cluster=None):
"""Gets reference to the network whose name is passed as the
argument.
"""
vm_networks = session._call_method(vim_util,
'get_object_properties',
None, cluster,
'ClusterComputeResource', ['network'])
while vm_networks:
if vm_networks.objects:
network_obj = _get_network_obj(session, vm_networks.objects,
network_name)
if network_obj:
session._call_method(vutil, 'cancel_retrieval',
vm_networks)
return network_obj
vm_networks = session._call_method(vutil, 'continue_retrieval',
vm_networks)
LOG.debug("Network %s not found on cluster!", network_name)
def get_vswitch_for_vlan_interface(session, vlan_interface, cluster=None):
"""Gets the vswitch associated with the physical network adapter
with the name supplied.
"""
# Get the list of vSwicthes on the Host System
host_mor = vm_util.get_host_ref(session, cluster)
vswitches_ret = session._call_method(vutil,
"get_object_property",
host_mor,
"config.network.vswitch")
# Meaning there are no vSwitches on the host. Shouldn't be the case,
# but just doing code check
if not vswitches_ret:
return
vswitches = vswitches_ret.HostVirtualSwitch
# Get the vSwitch associated with the network adapter
for elem in vswitches:
try:
for nic_elem in elem.pnic:
if str(nic_elem).split('-')[-1].find(vlan_interface) != -1:
return elem.name
# Catching Attribute error as a vSwitch may not be associated with a
# physical NIC.
except AttributeError:
pass
def check_if_vlan_interface_exists(session, vlan_interface, cluster=None):
"""Checks if the vlan_interface exists on the esx host."""
host_mor = vm_util.get_host_ref(session, cluster)
physical_nics_ret = session._call_method(vutil,
"get_object_property",
host_mor,
"config.network.pnic")
# Meaning there are no physical nics on the host
if not physical_nics_ret:
return False
physical_nics = physical_nics_ret.PhysicalNic
for pnic in physical_nics:
if vlan_interface == pnic.device:
return True
return False
def get_vlanid_and_vswitch_for_portgroup(session, pg_name, cluster=None):
"""Get the vlan id and vswitch associated with the port group."""
host_mor = vm_util.get_host_ref(session, cluster)
port_grps_on_host_ret = session._call_method(vutil,
"get_object_property",
host_mor,
"config.network.portgroup")
if not port_grps_on_host_ret:
msg = _("ESX SOAP server returned an empty port group "
"for the host system in its response")
LOG.error(msg)
raise exception.NovaException(msg)
port_grps_on_host = port_grps_on_host_ret.HostPortGroup
for p_gp in port_grps_on_host:
if p_gp.spec.name == pg_name:
p_grp_vswitch_name = p_gp.spec.vswitchName
return p_gp.spec.vlanId, p_grp_vswitch_name
return None, None
def create_port_group(session, pg_name, vswitch_name, vlan_id=0, cluster=None):
"""Creates a port group on the host system with the vlan tags
supplied. VLAN id 0 means no vlan id association.
"""
client_factory = session.vim.client.factory
add_prt_grp_spec = vm_util.get_add_vswitch_port_group_spec(
client_factory,
vswitch_name,
pg_name,
vlan_id)
host_mor = vm_util.get_host_ref(session, cluster)
network_system_mor = session._call_method(vutil,
"get_object_property",
host_mor,
"configManager.networkSystem")
LOG.debug("Creating Port Group with name %s on "
"the ESX host", pg_name)
try:
session._call_method(session.vim,
"AddPortGroup", network_system_mor,
portgrp=add_prt_grp_spec)
except vexc.AlreadyExistsException:
# There can be a race condition when two instances try
# adding port groups at the same time. One succeeds, then
# the other one will get an exception. Since we are
# concerned with the port group being created, which is done
# by the other call, we can ignore the exception.
LOG.debug("Port Group %s already exists.", pg_name)
LOG.debug("Created Port Group with name %s on "
"the ESX host", pg_name)
| |
import os
import re
from itertools import repeat
from sanic.blueprints import Blueprint
from sanic.response import json, redirect
from sanic.views import CompositionView
from .doc import RouteSpec, definitions
from .doc import route as doc_route
from .doc import route_specs, serialize_schema
from .spec import Spec
swagger_blueprint = Blueprint("swagger", url_prefix="/swagger")
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.abspath(dir_path + "/ui")
# Redirect "/swagger" to "/swagger/"
@swagger_blueprint.route("", strict_slashes=True)
def index(request):
return redirect("{}/".format(swagger_blueprint.url_prefix))
swagger_blueprint.static("/", dir_path + "/index.html", strict_slashes=True)
swagger_blueprint.static("/", dir_path)
def get_uri_filter(app):
"""
Return a filter function that takes a URI and returns whether it should
be filter out from the swagger documentation or not.
Arguments:
app: The application to take `config.API_URI_FILTER` from. Possible
values for this config option are: `slash` (to keep URIs that
end with a `/`), `all` (to keep all URIs). All other values
default to keep all URIs that don't end with a `/`.
Returns:
`True` if the URI should be *filtered out* from the swagger
documentation, and `False` if it should be kept in the documentation.
"""
choice = getattr(app.config, "API_URI_FILTER", None)
if choice == "slash":
# Keep URIs that end with a /.
return lambda uri: not uri.endswith("/")
if choice == "all":
# Keep all URIs.
return lambda uri: False
# Keep URIs that don't end with a /, (special case: "/").
return lambda uri: len(uri) > 1 and uri.endswith("/")
def remove_nulls(dictionary, deep=True):
"""
Removes all null values from a dictionary.
"""
return {
k: remove_nulls(v, deep) if deep and type(v) is dict else v
for k, v in dictionary.items()
if v is not None
}
@swagger_blueprint.listener("after_server_start")
def build_spec(app, loop):
_spec = Spec(app=app)
# --------------------------------------------------------------- #
# Blueprint Tags
# --------------------------------------------------------------- #
for blueprint in app.blueprints.values():
if hasattr(blueprint, "routes"):
for route in blueprint.routes:
if hasattr(route.handler, "view_class"):
# class based view
view = route.handler.view_class
for http_method in route.methods:
_handler = getattr(view, http_method.lower(), None)
if _handler:
route_spec = route_specs[_handler]
route_spec.blueprint = blueprint
if not route_spec.tags:
route_spec.tags.append(blueprint.name)
else:
route_spec = route_specs[route.handler]
route_spec.blueprint = blueprint
if not route_spec.tags:
route_spec.tags.append(blueprint.name)
paths = {}
uri_filter = get_uri_filter(app)
for uri, route in app.router.routes_all.items():
# Ignore routes under swagger blueprint
if route.uri.startswith(swagger_blueprint.url_prefix):
continue
# Apply the URI filter
if uri_filter(uri):
continue
# route.name will be None when using class based view
if route.name and "static" in route.name:
# TODO: add static flag in sanic routes
continue
# --------------------------------------------------------------- #
# Methods
# --------------------------------------------------------------- #
# Build list of methods and their handler functions
handler_type = type(route.handler)
if handler_type is CompositionView:
view = route.handler
method_handlers = view.handlers.items()
else:
method_handlers = zip(route.methods, repeat(route.handler))
methods = {}
for _method, _handler in method_handlers:
if hasattr(_handler, "view_class"):
view_handler = getattr(_handler.view_class, _method.lower())
route_spec = route_specs.get(view_handler) or RouteSpec()
else:
route_spec = route_specs.get(_handler) or RouteSpec()
if _method == "OPTIONS" or route_spec.exclude:
continue
api_consumes_content_types = getattr(
app.config, "API_CONSUMES_CONTENT_TYPES", ["application/json"]
)
consumes_content_types = (
route_spec.consumes_content_type or api_consumes_content_types
)
api_produces_content_types = getattr(
app.config, "API_PRODUCES_CONTENT_TYPES", ["application/json"]
)
produces_content_types = (
route_spec.produces_content_type or api_produces_content_types
)
# Parameters - Path & Query String
route_parameters = []
for parameter in route.parameters:
route_parameters.append(
{
**serialize_schema(parameter.cast),
"required": True,
"in": "path",
"name": parameter.name,
}
)
for consumer in route_spec.consumes:
spec = serialize_schema(consumer.field)
if "properties" in spec:
for name, prop_spec in spec["properties"].items():
route_param = {
**prop_spec,
"required": consumer.required,
"in": consumer.location,
"name": name,
}
else:
route_param = {
**spec,
"required": consumer.required,
"in": consumer.location,
"name": consumer.field.name
if not isinstance(consumer.field, type)
and hasattr(consumer.field, "name")
else "body",
}
if "$ref" in route_param:
route_param["schema"] = {"$ref": route_param["$ref"]}
del route_param["$ref"]
route_parameters.append(route_param)
responses = {}
if len(route_spec.response) == 0:
responses["200"] = {
"schema": serialize_schema(route_spec.produces.field)
if route_spec.produces
else None,
"description": route_spec.produces.description
if route_spec.produces
else None,
}
for (status_code, routefield) in route_spec.response:
responses["{}".format(status_code)] = {
"schema": serialize_schema(routefield.field),
"description": routefield.description,
}
endpoint = remove_nulls(
{
"operationId": route_spec.operation or route.name,
"summary": route_spec.summary,
"description": route_spec.description,
"consumes": consumes_content_types,
"produces": produces_content_types,
"tags": route_spec.tags or None,
"parameters": route_parameters,
"responses": responses,
}
)
methods[_method.lower()] = endpoint
uri_parsed = uri
for parameter in route.parameters:
uri_parsed = re.sub(
"<" + parameter.name + ".*?>", "{" + parameter.name + "}", uri_parsed
)
if methods:
paths[uri_parsed] = methods
# --------------------------------------------------------------- #
# Definitions
# --------------------------------------------------------------- #
_spec.add_definitions(
definitions={
obj.object_name: definition for obj, definition in definitions.values()
}
)
# --------------------------------------------------------------- #
# Tags
# --------------------------------------------------------------- #
# TODO: figure out how to get descriptions in these
tags = {}
for route_spec in route_specs.values():
if route_spec.blueprint and route_spec.blueprint.name in ("swagger"):
# TODO: add static flag in sanic routes
continue
for tag in route_spec.tags:
tags[tag] = True
_spec.add_tags(tags=[{"name": name} for name in tags.keys()])
_spec.add_paths(paths)
swagger_blueprint._spec = _spec
@swagger_blueprint.route("/swagger.json")
@doc_route(exclude=True)
def spec(request):
return json(swagger_blueprint._spec.as_dict)
@swagger_blueprint.route("/swagger-config")
def config(request):
options = {}
if hasattr(request.app.config, "SWAGGER_UI_CONFIGURATION"):
options = getattr(request.app.config, "SWAGGER_UI_CONFIGURATION")
return json(options)
| |
#-----------------------------------------------------------------
# _ast_gen.py
#
# Generates the AST Node classes from a specification given in
# a .yaml file
#
# The design of this module was inspired by astgen.py from the
# Python 2.5 code-base.
#
# Copyright (C) 2008-2011, Eli Bendersky
# License: BSD
#-----------------------------------------------------------------
import pprint
from string import Template
class ASTCodeGenerator(object):
def __init__(self, cfg_filename='_c_ast.cfg'):
""" Initialize the code generator from a configuration
file.
"""
self.cfg_filename = cfg_filename
self.node_cfg = [NodeCfg(name, contents) for (name, contents) in self.parse_cfgfile(cfg_filename)]
def generate(self, file=None):
""" Generates the code into file, an open file buffer.
"""
src = Template(_PROLOGUE_COMMENT).substitute(
cfg_filename=self.cfg_filename)
src += _PROLOGUE_CODE
for node_cfg in self.node_cfg:
src += node_cfg.generate_source() + '\n\n'
file.write(src)
def parse_cfgfile(self, filename):
""" Parse the configuration file and yield pairs of
(name, contents) for each node.
"""
with open(filename, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
colon_i = line.find(':')
lbracket_i = line.find('[')
rbracket_i = line.find(']')
if colon_i < 1 or lbracket_i <= colon_i or rbracket_i <= lbracket_i:
raise RuntimeError("Invalid line in %s:\n%s\n" % (filename, line))
name = line[:colon_i]
val = line[lbracket_i + 1:rbracket_i]
vallist = [v.strip() for v in val.split(',')] if val else []
yield name, vallist
class NodeCfg(object):
""" Node configuration.
name: node name
contents: a list of contents - attributes and child nodes
See comment at the top of the configuration file for details.
"""
def __init__(self, name, contents):
self.name = name
self.all_entries = []
self.attr = []
self.child = []
self.seq_child = []
for entry in contents:
clean_entry = entry.rstrip('*')
self.all_entries.append(clean_entry)
if entry.endswith('**'):
self.seq_child.append(clean_entry)
elif entry.endswith('*'):
self.child.append(clean_entry)
else:
self.attr.append(entry)
def generate_source(self):
src = self._gen_init()
src += '\n' + self._gen_children()
src += '\n' + self._gen_attr_names()
return src
def _gen_init(self):
src = "class %s(Node):\n" % self.name
if self.all_entries:
args = ', '.join(self.all_entries)
arglist = '(self, %s, coord=None)' % args
else:
arglist = '(self, coord=None)'
src += " def __init__%s:\n" % arglist
for name in self.all_entries + ['coord']:
src += " self.%s = %s\n" % (name, name)
return src
def _gen_children(self):
src = ' def children(self):\n'
if self.all_entries:
src += ' nodelist = []\n'
for child in self.child:
src += (
' if self.%(child)s is not None:' +
' nodelist.append(("%(child)s", self.%(child)s))\n') % (
dict(child=child))
for seq_child in self.seq_child:
src += (
' for i, child in enumerate(self.%(child)s or []):\n'
' nodelist.append(("%(child)s[%%d]" %% i, child))\n') % (
dict(child=seq_child))
src += ' return tuple(nodelist)\n'
else:
src += ' return ()\n'
return src
def _gen_attr_names(self):
src = " attr_names = (" + ''.join("%r," % nm for nm in self.attr) + ')'
return src
_PROLOGUE_COMMENT = \
r'''#-----------------------------------------------------------------
# ** ATTENTION **
# This code was automatically generated from the file:
# $cfg_filename
#
# Do not modify it directly. Modify the configuration file and
# run the generator again.
# ** ** *** ** **
#
# pycparser: c_ast.py
#
# AST Node classes.
#
# Copyright (C) 2008-2011, Eli Bendersky
# License: BSD
#-----------------------------------------------------------------
'''
_PROLOGUE_CODE = r'''
import sys
class Node(object):
""" Abstract base class for AST nodes.
"""
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
lead = ' ' * offset
if nodenames and _my_node_name is not None:
buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ')
else:
buf.write(lead + self.__class__.__name__+ ': ')
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
buf.write(attrstr)
if showcoord:
buf.write(' (at %s)' % self.coord)
buf.write('\n')
for (child_name, child) in self.children():
child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
class NodeVisitor(object):
""" A base NodeVisitor class for visiting c_ast nodes.
Subclass it and define your own visit_XXX methods, where
XXX is the class name you want to visit with these
methods.
For example:
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
Creates a list of values of all the constant nodes
encountered below the given node. To use it:
cv = ConstantVisitor()
cv.visit(node)
Notes:
* generic_visit() will be called for AST nodes for which
no visit_XXX method was defined.
* The children of nodes for which a visit_XXX was
defined will not be visited - if you need this, call
generic_visit() on the node.
You can use:
NodeVisitor.generic_visit(self, node)
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
def visit(self, node):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
for c_name, c in node.children():
self.visit(c)
'''
if __name__ == "__main__":
import sys
ast_gen = ASTCodeGenerator('_c_ast.cfg')
ast_gen.generate(open('c_ast.py', 'w'))
| |
from __future__ import division
import os
import sys
import re
import getopt
import math
def usage():
print('gnwd.py Input Output [options]\n\
options:\n\
-c CountThresholdPercent,(0,1),default=0.5\n\
-C CountThreshold\n\
-b BindThresholdPercent,(0,1),default=0.5\n\
-B BindThreshold\n\
-e EntropyThresholdPercent,(0,1),default=0.5\n\
-E EntropyThreshold\n\
-d whether output debug file,default=False')
def threshold_cal(lst, thres_percent):
lst = sorted(lst)
length = len(lst)
return lst[math.floor(length * thres_percent)]
def candi_word_gen(sen, sen_len, nword):
lst = []
i = nword
while(i < sen_len + 1):
lst.append(sen[i-nword:i])
i += 1
return lst
def update_count_dict(dict, sen, nword):
sen_len = len(sen)
lst = candi_word_gen(sen, sen_len, nword)
word_set = set(lst)
for word in word_set:
count = lst.count(word)
if word in dict.keys():
pre_count = dict[word]
dict[word] = pre_count + count
else:
dict[word] = count
def bind_cal(count, count_part1, count_part2, TOTAL_WORD_NUM):
return (TOTAL_WORD_NUM * count) / (count_part1 * count_part2)
def update_w2_bind_dict(dict_w1, dict_w2, TOTAL_WORD_NUM):
for word, count in dict_w2.items():
count_part1 = dict_w1[word[0]]
count_part2 = dict_w1[word[1]]
bind = bind_cal(count, count_part1, count_part2, TOTAL_WORD_NUM)
dict_w2[word] = [count, bind]
def update_w3_bind_dict(dict_w1, dict_w2, dict_w3, TOTAL_WORD_NUM):
for word, count in dict_w3.items():
count_part1 = dict_w1[word[0]]
count_part2 = dict_w2[word[1:]][0]
bind1 = bind_cal(count, count_part1, count_part2, TOTAL_WORD_NUM)
count_part1 = dict_w1[word[-1]]
count_part2 = dict_w2[word[0:-1]][0]
bind2 = bind_cal(count, count_part1, count_part2, TOTAL_WORD_NUM)
bind = min([bind1, bind2])
dict_w3[word] = [count, bind]
def update_w4_bind_dict(dict_w1, dict_w2, dict_w3, dict_w4, TOTAL_WORD_NUM):
for word, count in dict_w4.items():
count_part1 = dict_w1[word[0]]
count_part2 = dict_w3[word[1:]][0]
bind1 = bind_cal(count, count_part1, count_part2, TOTAL_WORD_NUM)
count_part1 = dict_w2[word[0:2]][0]
count_part2 = dict_w2[word[2:]][0]
bind2 = bind_cal(count, count_part1, count_part2, TOTAL_WORD_NUM)
count_part1 = dict_w3[word[0:3]][0]
count_part2 = dict_w1[word[-1]]
bind3 = bind_cal(count, count_part1, count_part2, TOTAL_WORD_NUM)
bind = min([bind1, bind2, bind3])
dict_w4[word] = [count, bind]
def ent_cal(lst):
count_total = sum(lst)
ent = 0
for count in lst:
prob = count / count_total
ent = ent - prob * math.log(prob, 2)
return ent
def update_ent_dict(dict1, dict2, nword):
words_lst = sorted(dict2.keys())
words_num = len(words_lst)
LstForCalEnt=[]
words_lst.append('*' * nword)
for i in range(1, words_num + 1):
if words_lst[i][:-1] == words_lst[i-1][:-1]:
if type(dict2[words_lst[i-1]]) == list:
LstForCalEnt.append(dict2[words_lst[i-1]][0])
else:
LstForCalEnt.append(dict2[words_lst[i-1]])
else:
if type(dict2[words_lst[i-1]]) == list:
LstForCalEnt.append(dict2[words_lst[i-1]][0])
else:
LstForCalEnt.append(dict2[words_lst[i-1]])
ent = ent_cal(LstForCalEnt)
info = dict1[words_lst[i-1][:-1]]
info.append(ent)
dict1[words_lst[i-1][:-1]] = info
LstForCalEnt = []
words_lst.pop()
for i in range(words_num):
words_lst[i] = words_lst[i][::-1]
words_lst = sorted(words_lst)
words_lst.append('*' * nword)
LstForCalEnt=[]
for i in range(1, words_num + 1):
if words_lst[i][:-1] == words_lst[i-1][:-1]:
if type(dict2[words_lst[i-1][::-1]]) == list:
LstForCalEnt.append(dict2[words_lst[i-1][::-1]][0])
else:
LstForCalEnt.append(dict2[words_lst[i-1][::-1]])
else:
if type(dict2[words_lst[i-1][::-1]]) == list:
LstForCalEnt.append(dict2[words_lst[i-1][::-1]][0])
else:
LstForCalEnt.append(dict2[words_lst[i-1][::-1]])
ent = ent_cal(LstForCalEnt)
info = dict1[words_lst[i-1][:-1][::-1]]
if len(info) == 2:
info.append(ent)
dict1[words_lst[i-1][:-1][::-1]] = info
else:
if info[2] > ent:
info[2] = ent
dict1[words_lst[i-1][:-1][::-1]] = info
LstForCalEnt = []
for word, info in dict1.items():
if len(info) < 3:
info.append(0)
dict1[word] = info
def new_words_output(count_threshold_per, count_threshold,
bind_threshold_per, bind_threshold,
ent_threshold_per, ent_threshold,
dict, nword, output, debug):
count_lst = []
bind_lst = []
ent_lst = []
for word, info in dict.items():
count_lst.append(info[0])
bind_lst.append(info[1])
ent_lst.append(info[2])
count_lst = sorted(count_lst)
bind_lst = sorted(bind_lst)
ent_lst = sorted(ent_lst)
if count_threshold != '':
count_threshold = count_threshold
elif count_threshold_per:
count_threshold = threshold_cal(count_lst, count_threshold_per)
if bind_threshold != '':
bind_threshold = bind_threshold
elif bind_threshold_per:
bind_threshold = threshold_cal(bind_lst, bind_threshold_per)
if ent_threshold != '':
ent_threshold = ent_threshold
elif ent_threshold_per:
ent_threshold = threshold_cal(ent_lst, ent_threshold_per)
fout = open(output + '_' + nword, 'w', encoding='utf8')
if debug:
fdebug = open(output + '_' + nword + '_debug', 'w', encoding='utf8')
for word, info in dict.items():
if debug:
fdebug.write(word + ' ' + str(info[0])
+' ' + str(float('%.1f' %info[1]))
+' ' + str(float('%.3f' %info[2]))+'\n')
if info[0] > count_threshold and info[1] > bind_threshold and info[2] > ent_threshold:
fout.write(word + '\n')
fout.close()
if debug:
fdebug.close()
def main(argc, argv):
if argc < 3:
usage()
exit()
file_in = argv[1]
file_out = argv[2]
opts, args = getopt.getopt(sys.argv[3:],"hdc:C:b:B:e:E:")
count_threshold_per = 0.5
count_threshold = ''
bind_threshold_per = 0.5
bind_threshold = ''
ent_threshold_per = 0.5
ent_threshold = ''
debug = False
for opt, value in opts:
if opt == '-c':
count_threshold_per = float(value)
elif opt == '-C':
count_threshold = float(value)
elif opt == '-b':
bind_threshold_per = float(value)
elif opt == '-B':
bind_threshold = float(value)
elif opt == '-e':
ent_threshold_per = float(value)
elif opt == '-E':
ent_threshold = float(value)
elif opt == '-d':
debug = True
elif opt == '-h':
usage()
exit()
dict_w1={}
dict_w2={}
dict_w3={}
dict_w4={}
dict_w5={}
TOTAL_WORD_NUM = 0
with open(file_in, 'r', encoding='utf8') as fin:
for sen in fin.readlines():
sen = sen.strip()
if not sen:
continue
sub_sen_lst = re.split(r"\W", sen)
for sub_sen in sub_sen_lst:
sub_sen_len = len(sub_sen)
TOTAL_WORD_NUM = TOTAL_WORD_NUM + sub_sen_len
if sub_sen_len >= 1:
update_count_dict(dict_w1, sub_sen, 1)
if sub_sen_len >= 2:
update_count_dict(dict_w2, sub_sen, 2)
if sub_sen_len >= 3:
update_count_dict(dict_w3, sub_sen, 3)
if sub_sen_len >= 4:
update_count_dict(dict_w4, sub_sen, 4)
if sub_sen_len >= 5:
update_count_dict(dict_w5, sub_sen, 5)
else:
continue
else:
continue
else:
continue
else:
continue
else:
continue
if not dict_w1 or not dict_w2 or not dict_w3 or not dict_w4 or not dict_w5:
print('warning: too short txt or lines are too short, no new words output')
exit()
update_w2_bind_dict(dict_w1, dict_w2, TOTAL_WORD_NUM)
update_w3_bind_dict(dict_w1, dict_w2, dict_w3, TOTAL_WORD_NUM)
update_w4_bind_dict(dict_w1, dict_w2, dict_w3, dict_w4, TOTAL_WORD_NUM)
update_ent_dict(dict_w2, dict_w3, 3)
update_ent_dict(dict_w3, dict_w4, 4)
update_ent_dict(dict_w4, dict_w5, 5)
new_words_output(count_threshold_per, count_threshold,
bind_threshold_per, bind_threshold,
ent_threshold_per, ent_threshold,
dict_w2, 'word2', file_out, debug)
new_words_output(count_threshold_per, count_threshold,
bind_threshold_per, bind_threshold,
ent_threshold_per, ent_threshold,
dict_w3, 'word3', file_out, debug)
new_words_output(count_threshold_per, count_threshold,
bind_threshold_per, bind_threshold,
ent_threshold_per, ent_threshold,
dict_w4, 'word4', file_out, debug)
if __name__=='__main__':
main(len(sys.argv),sys.argv)
| |
# MIX-INS
# Specific objects inherit from these properties, in addition to inheriting from Object
# e.g. Dresser inherits from Openable, Container, and Object, while Lamp inherits from Lightable and Object
# Mix-ins define verbs that can be used on the objects, as well as required attributes
# (throws an error when creating an object if it does not define all required attributes)
# and changeable attributes (used in object serialisation).
class Openable(object):
required_attrs = ['is_open', 'open_description', 'closed_description']
changeable_attrs = ['is_open']
def open(self, **kwargs):
if self.is_open:
return "That object is as open as it can get!"
else:
self.is_open = True
return self.open_description
def pull(self, **kwargs):
return self.open(**kwargs)
def close(self, **kwargs):
if self.is_open:
self.is_open = False
return self.closed_description
else:
return "That object can't get any more closed."
def shut(self, **kwargs):
return self.close(**kwargs)
def push(self, **kwargs):
return self.close(**kwargs)
class Lightable(object):
required_attrs = ['is_lit', 'on_description', 'off_description']
changeable_attrs = ['is_lit']
def light(self, **kwargs):
if self.is_lit:
return "The object is already glowing brightly"
else:
self.is_lit = True
return self.on_description
def turn_on(self, **kwargs):
return self.light(**kwargs)
def snuff(self, **kwargs):
if self.is_lit:
self.is_lit = False
return "The glow fades into blackness."
else:
return "The object cannot get any darker."
def turn_off(self, **kwargs):
return self.snuff(**kwargs)
class UnreachableLight(Lightable):
required_attrs = ['block', 'is_lit', 'error_description']
changeable_attrs = ['block', 'is_lit']
def _is_standing(self, room):
self.block = True
for obj in room.objects:
try:
if obj.has_user:
self.block = False
except AttributeError:
pass
def light(self, location=None, **kwargs):
if not location:
raise Exception('location must be provided')
self._is_standing(location)
if self.block:
return self.error_description
else:
return super(UnreachableLight, self).light()
def turn_on(self, *args, **kwargs):
return self.light(*args, **kwargs)
def snuff(self, location=None, **kwargs):
if not location:
raise Exception('location must be provided')
self._is_standing(location)
if self.block:
return self.error_description
else:
return super(UnreachableLight, self).snuff()
def turn_off(self, *args, **kwargs):
return self.snuff(*args, **kwargs)
class Gettable(object):
required_attrs = []
changeable_attrs = []
# room can be an actual room or a Container object
def get(self, location=None, inventory=None, **kwargs):
if not location:
raise Exception('location must be provided')
if inventory == None:
raise Exception('inventory must be provided')
for obj in location.objects:
try:
if self in obj.objects:
if obj.is_open:
inventory.append(self)
obj.objects.remove(self)
else:
return "The object is closed."
except AttributeError:
pass
if self in location.objects:
inventory.append(self)
location.objects.remove(self)
elif self in inventory:
return "You already have that object."
else:
return "That object does not exist."
def pickup(self, **kwargs):
return self.get(**kwargs)
def drop(self, location=None, inventory=None, **kwargs):
if not location:
raise Exception('location must be provided')
if inventory == None:
raise Exception('inventory must be provided')
if self in inventory:
location.objects.append(self)
inventory.remove(self)
else:
return "That item is not currently in your inventory."
class Climbable(object):
required_attrs = ['has_user']
changeable_attrs = ['has_user']
# climb and stand are multipurpose
#(i.e. calling climb once will set has_user to True, while calling climb again will set has_user to False)
def climb(self, inventory=None, **kwargs):
if inventory == None:
raise Exception('inventory must be provided')
if self in inventory:
return "You cannot climb that while still holding it."
if self.has_user:
self.has_user = False
return "You step carefully back down."
else:
self.has_user = True
return "You clamber onto the object."
def stand(self, **kwargs):
return self.climb(**kwargs)
# get_on, get_off, and get_down are single-purpose
def get_on(self, inventory=None, **kwargs):
if inventory == None:
raise Exception('inventory must be provided')
if self in inventory:
return "You cannot climb that while still holding it."
if self.has_user:
return "You are already standing on that object!"
else:
self.has_user = True
return "You clamber onto the object."
def get_off(self, inventory=None, **kwargs):
if inventory == None:
raise Exception('inventory must be provided')
if self.has_user:
self.has_user = False
return "You step carefully back down."
else:
return "You are not standing on anything."
def get_down(self, **kwargs):
return self.get_off(**kwargs)
class Container(object):
required_attrs = ['is_open', 'objects']
changeable_attrs = ['is_open', 'objects']
# get is defined in Gettable (allows an object to be 'gotten' from a room or open object)
def put_in(self, obj, inventory=None, **kwargs):
if inventory == None:
raise Exception('inventory must be provided')
if obj not in inventory:
raise Exception('you must be holding that object')
if self.is_open:
self.objects.append(obj)
inventory.remove(obj)
return "You place the object inside the open container."
else:
return "Try opening the container first."
def look_in(self, **kwargs):
if self.is_open:
if len(self.objects) > 1:
description = "This object contains: " + "{}, " * (len(self.objects) - 1) + "{}."
return description.format(*[obj.name for obj in self.objects])
elif len(self.objects) == 1:
return "This object has a {} inside.".format(self.objects[0].name)
else:
return "This object is empty."
else:
return "You cannot look inside a closed object."
| |
"""
Instance model for atmosphere.
"""
from hashlib import md5
from datetime import datetime, timedelta
from django.db import (
models, transaction, DatabaseError
)
from django.db.models import (
Q, ObjectDoesNotExist
)
from django.utils import timezone
import pytz
from rtwo.models.machine import MockMachine
from rtwo.models.size import MockSize
from threepio import logger
from core.models.identity import Identity
from core.models.instance_source import InstanceSource
from core.models.machine import (
convert_esh_machine, get_or_create_provider_machine
)
from core.models.volume import convert_esh_volume
from core.models.size import (
convert_esh_size, Size
)
from core.models.tag import Tag
from core.models.managers import ActiveInstancesManager
from atmosphere import settings
class Instance(models.Model):
"""
When a user launches a machine, an Instance is created.
Instances are described by their Name and associated Tags
Instances have a specific ID of the machine or volume
they were created from (source)
Instances have a specific ID provided by the cloud provider (provider_alias)
The IP Address, creation and termination date,
and the user who launched the instance are recorded for logging purposes.
"""
esh = None
name = models.CharField(max_length=256)
# TODO: CreateUUIDfield that is *not* provider_alias?
# token is used to help instance 'phone home' to server post-deployment.
token = models.CharField(max_length=36, blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
# The specific machine & provider for which this instance exists
source = models.ForeignKey(InstanceSource, related_name='instances')
provider_alias = models.CharField(max_length=256, unique=True)
ip_address = models.GenericIPAddressField(null=True, unpack_ipv4=True)
created_by = models.ForeignKey('AtmosphereUser')
# FIXME: Why is null=True okay here?
created_by_identity = models.ForeignKey(Identity, null=True)
shell = models.BooleanField(default=False)
vnc = models.BooleanField(default=False)
web_desktop = models.BooleanField(default=False)
password = models.CharField(max_length=64, blank=True, null=True)
# FIXME Problems when setting a default, missing auto_now_add
start_date = models.DateTimeField()
end_date = models.DateTimeField(null=True, blank=True)
# Model Managers
objects = models.Manager() # The default manager.
active_instances = ActiveInstancesManager()
@property
def project_name(self):
if not self.created_by_identity:
return None
return self.created_by_identity.get_credential('ex_project_name')
@property
def provider(self):
return self.source.provider
@classmethod
def for_user(self, user):
identity_ids = user.current_identities.values_list('id', flat=True)
qs = Instance.objects.filter(created_by_identity__in=identity_ids)
return qs
def get_total_hours(self):
from service.monitoring import _get_allocation_result
identity = self.created_by_identity
limit_instances = [self.provider_alias]
result = _get_allocation_result(
identity,
limit_instances=limit_instances)
total_hours = result.total_runtime().total_seconds() / 3600.0
hours = round(total_hours, 2)
return hours
def get_projects(self, user):
# TODO: Replace with 'only_current'
projects = self.projects.filter(
Q(end_date=None) | Q(end_date__gt=timezone.now()),
owner=user,
)
return projects
def get_first_history(self):
"""
Returns the first InstanceStatusHistory
"""
# TODO: Profile Option
# except InstanceStatusHistory.DoesNotExist:
# TODO: Profile current choice
try:
return self.instancestatushistory_set.order_by(
'start_date').first()
except ObjectDoesNotExist:
return None
def get_last_history(self):
"""
Returns the newest InstanceStatusHistory
"""
# FIXME: Clean up this implementation OR rename to `get_or_create`
# TODO: Profile Option
# except InstanceStatusHistory.DoesNotExist:
# TODO: Profile current choice
# FIXME: Move this call so that it happens inside InstanceStatusHistory to avoid circ.dep.
last_history = self.instancestatushistory_set.order_by(
'-start_date').first()
if last_history:
return last_history
else:
unknown_size, _ = Size.objects.get_or_create(
name='Unknown Size', alias='N/A', provider=self.provider,
cpu=-1, mem=-1, root=-1, disk=-1)
last_history = self._build_first_history(
'Unknown', unknown_size, self.start_date, self.end_date, True)
logger.warn("No history existed for %s until now. "
"An 'Unknown' history was created" % self)
return last_history
def _build_first_history(self, status_name, size,
start_date, end_date=None, first_update=False, activity=None):
# FIXME: Move this call so that it happens inside InstanceStatusHistory to avoid circ.dep.
from core.models import InstanceStatusHistory
if not first_update and status_name not in [
'build',
'pending',
'running']:
logger.info("First Update Unknown - Status name on instance \
%s: %s - %s"
% (self.provider_alias, status_name))
# Instance state is 'unknown' from start of instance until now
# NOTE: This is needed to prevent over-charging accounts
status_name = 'unknown'
activity = None
first_history = InstanceStatusHistory.create_history(
status_name, self, size, start_date=start_date, end_date=end_date, activity=activity)
first_history.save()
return first_history
def update_history(
self,
status_name,
size,
task=None,
tmp_status=None,
first_update=False):
"""
Given the status name and size, look up the previous history object
If nothing has changed: return (False, last_history)
else: end date previous history object, start new history object.
return (True, new_history)
"""
# FIXME: Move this call so that it happens inside InstanceStatusHistory to avoid circ.dep.
from core.models import InstanceStatusHistory
import traceback
# 1. Get status name
status_name = _get_status_name_for_provider(
self.provider_machine.provider,
status_name,
task,
tmp_status)
activity = self.esh_activity()
# 2. Get the last history (or Build a new one if no other exists)
last_history = self.get_last_history()
if not last_history:
last_history = InstanceStatusHistory.create_history(
status_name, self, size, start_date=self.start_date, activity=activity)
last_history.save()
logger.debug("STATUSUPDATE - FIRST - Instance:%s Old Status: %s - %s New\
Status: %s Tmp Status: %s" % (self.provider_alias,
self.esh_status(),
self.esh_activity(),
status_name,
tmp_status))
logger.debug("STATUSUPDATE - Traceback: %s"
% traceback.format_stack())
# 2. Size and name must match to continue using last history
if last_history.status.name == status_name \
and last_history.size.id == size.id:
# logger.info("status_name matches last history:%s " %
# last_history.status.name)
return (False, last_history)
logger.debug("STATUSUPDATE - Instance:%s Old Status: %s - %s New Status: %s\
Tmp Status: %s" % (self.provider_alias,
self.esh_status(),
self.esh_activity(),
status_name,
tmp_status))
logger.debug("STATUSUPDATE - Traceback: %s" % traceback.format_stack())
# 3. ASSERT: A new history item is required due to a State or Size
# Change
now_time = timezone.now()
try:
new_history = InstanceStatusHistory.transaction(
status_name, activity, self, size,
start_time=now_time,
last_history=last_history)
return (True, new_history)
except ValueError:
logger.exception("Bad transaction")
return (False, last_history)
def _calculate_active_time(self, delta=None):
if not delta:
# Default delta == Time since instance created.
delta = timezone.now() - self.start_date
past_time = timezone.now() - delta
recent_history = self.instancestatushistory_set.filter(
Q(end_date=None) | Q(end_date__gt=past_time)
).order_by('start_date')
total_time = timedelta()
inst_prefix = "HISTORY,%s,%s" % (self.created_by.username,
self.provider_alias[:5])
for idx, state in enumerate(recent_history):
# Can't start counting any earlier than 'delta'
if state.start_date < past_time:
start_count = past_time
else:
start_count = state.start_date
# If date is current, stop counting 'right now'
if not state.end_date:
final_count = timezone.now()
else:
final_count = state.end_date
if state.is_active():
# Active time is easy
active_time = final_count - start_count
else:
# Inactive states are NOT counted against the user
active_time = timedelta()
# multiply by CPU count of size.
cpu_time = active_time * state.size.cpu
logger.debug("%s,%s,%s,%s CPU,%s,%s,%s,%s"
% (inst_prefix, state.status.name,
state.size.name, state.size.cpu,
strfdate(start_count), strfdate(final_count),
strfdelta(active_time), strfdelta(cpu_time)))
total_time += cpu_time
return total_time
def get_active_hours(self, delta):
# Don't move it up. Circular reference.
from service.monitoring import delta_to_hours
total_time = self._calculate_active_time(delta)
return delta_to_hours(total_time)
def get_active_time(self, earliest_time=None, latest_time=None):
"""
Return active time, and the reference list that was counted.
"""
accounting_list = self._accounting_list(earliest_time, latest_time)
total_time = timedelta()
for state in accounting_list:
total_time += state.cpu_time
return total_time, accounting_list
def recent_history(self, earliest_time, latest_time):
"""
Return all Instance Status History
Currently Running
OR
Terminated after: now() - delta (ex:7 days, 1 month, etc.)
"""
active_history = self.instancestatushistory_set.filter(
# Collect history that is Current or has 'countable' time..
Q(end_date=None) | Q(end_date__gt=earliest_time)
).order_by('start_date')
return active_history
def _accounting_list(self, earliest_time=None, latest_time=None):
"""
Return the list of InstanceStatusHistory that should be counted,
according to the limits of 'earliest_time' and 'latest_time'
"""
if not latest_time:
latest_time = timezone.now()
# Determine the earliest time to start counts.
if not earliest_time:
earliest_time = self.start_date
accounting_list = []
active_history = self.recent_history(earliest_time, latest_time)
for state in active_history:
(active_time, start_count, end_count) = state.get_active_time(
earliest_time, latest_time)
state.active_time = active_time
state.start_count = start_count
state.end_count = end_count
state.cpu_time = active_time * state.size.cpu
accounting_list.append(state)
return accounting_list
def end_date_all(self, end_date=None):
"""
Call this function to tie up loose ends when the instance is finished
(Destroyed, terminated, no longer exists..)
"""
if not end_date:
end_date = timezone.now()
ish_list = self.instancestatushistory_set.filter(end_date=None)
for ish in ish_list:
# logger.info('Saving history:%s' % ish)
if not ish.end_date:
logger.info("END DATING instance history %s: %s" % (ish, end_date))
ish.end_date = end_date
ish.save()
if not self.end_date:
logger.info("END DATING instance %s: %s" % (self.provider_alias, end_date))
self.end_date = end_date
self.save()
def creator_name(self):
if not self.created_by:
return "N/A"
return self.created_by.username
def hash_alias(self):
return md5(self.provider_alias).hexdigest()
def hash_machine_alias(self):
if self.esh and self.esh._node\
and self.esh._node.extra\
and self.esh._node.extra.get('imageId'):
return md5(self.esh._node.extra['imageId']).hexdigest()
else:
try:
if self.source:
return md5(self.source.identifier).hexdigest()
except InstanceSource.DoesNotExist:
logger.exception(
"Unable to find provider_machine for %s." %
self.provider_alias)
return 'Unknown'
def esh_fault(self):
if self.esh:
return self.esh.extra.get('fault', {})
return {}
def esh_status(self):
if self.esh:
return self.esh.get_status()
last_history = self.get_last_history()
if last_history:
return last_history.status.name
else:
return "Unknown"
def esh_activity(self):
activity = None
if self.esh:
try:
activity = " ".join(self.esh.get_status().split(' - ')[1:]).strip()
except:
activity = None
return activity
last_history = self.get_last_history()
if last_history:
try:
activity = " ".join(last_history.status.split(' - ')[1:]).strip()
except:
activity = None
return activity
else:
return "Unknown"
def get_size(self):
return self.get_last_history().size
def esh_size(self):
if not self.esh or not hasattr(self.esh, 'extra'):
last_history = self.get_last_history()
if last_history:
return last_history.size.alias
return "Unknown"
extras = self.esh.extra
if 'flavorId' in extras:
return extras['flavorId']
elif 'instance_type' in extras:
return extras['instance_type']
elif 'instancetype' in extras:
return extras['instancetype']
else:
return "Unknown"
def application_uuid(self):
if self.source.is_machine():
return self.source.providermachine\
.application_version.application.uuid
else:
return None
def application_name(self):
if self.source.is_machine():
return self.source.providermachine\
.application_version.application.name
else:
return None
def application_id(self):
if self.source.is_machine():
return self.source.providermachine\
.application_version.application.id
else:
return None
@property
def volume(self):
if self.source.is_volume():
return self.source.volume
return None
@property
def provider_machine(self):
if self.source.is_machine():
return self.source.providermachine
return None
def esh_source_name(self):
if self.source.is_machine():
return self.source.providermachine\
.application_version.application.name
elif self.source.is_volume():
return self.source.volume.name
else:
return "%s - Source Unknown" % self.source.identifier
def provider_uuid(self):
return self.source.provider.uuid
def provider_name(self):
return self.source.provider.location
def esh_source(self):
return self.source.identifier
@property
def allocation_source(self):
# FIXME: look up the current allocation source by "Scanning the event table" on this instance.
from core.models.allocation_source import \
InstanceAllocationSourceSnapshot as Snapshot
snapshot = Snapshot.objects.filter(instance=self).first()
return snapshot.allocation_source if snapshot else None
def change_allocation_source(self, allocation_source, user=None):
"""
Call this method when you want to issue a 'change_allocation_source' event to the database.
"""
if not settings.USE_ALLOCATION_SOURCE:
return
from core.models.event_table import EventTable
if not user:
user = self.created_by
# FIXME: comment out this line for AllocationSource
if not allocation_source:
raise Exception("Allocation source must not be null")
payload = {
'allocation_source_id': allocation_source.source_id,
'instance_id': self.provider_alias
}
return EventTable.create_event(
"instance_allocation_source_changed",
payload,
user.username)
def json(self):
return {
'alias': self.provider_alias,
'name': self.name,
'tags': [tag.json() for tag in self.tags.all()],
'ip_address': self.ip_address,
'provider_machine': self.provider_machine.json(),
'created_by': self.created_by.username,
}
def __unicode__(self):
return "%s (Name:%s, Creator:%s, IP:%s)" %\
(self.provider_alias, self.name,
self.creator_name(), self.ip_address)
class Meta:
db_table = "instance"
app_label = "core"
"""
Useful utility methods for the Core Model..
"""
OPENSTACK_TASK_STATUS_MAP = {
# Terminate tasks
# Suspend tasks
'resuming': 'build',
'suspending': 'suspended',
# Shutdown tasks
'powering-on': 'build',
'shutting-down': 'suspended',
# Instance launch tasks
'initializing': 'build',
'scheduling': 'build',
'spawning': 'build',
# Atmosphere Task-specific lines
'networking': 'networking',
'deploying': 'deploying',
'running_boot_script': 'deploying',
'deploy_error': 'deploy_error',
'boot_script_error': 'deploy_error',
}
OPENSTACK_ACTIVE_STATES = ['active']
OPENSTACK_INACTIVE_STATES = ['build', 'suspended', 'shutoff', 'Unknown']
def _get_status_name_for_provider(
provider,
status_name,
task_name=None,
tmp_status=None):
"""
Purpose: to be used in lookups/saves
Return the appropriate InstanceStatus
"""
provider_type = provider.get_type_name().lower()
if provider_type == 'openstack':
return _get_openstack_name_map(status_name, task_name, tmp_status)
logger.warn(
"Could not find a strategy for provider type:%s" %
provider_type)
return status_name
def _get_openstack_name_map(status_name, task_name, tmp_status):
new_status = None
if task_name:
new_status = OPENSTACK_TASK_STATUS_MAP.get(task_name)
if new_status:
logger.debug("Task provided:%s, Status maps to %s"
% (task_name, new_status))
elif tmp_status:
# ASSERT: task_name = None
if 'running_boot_script' in tmp_status:
tmp_status = 'running_boot_script' # Avoid problems due to keeping track of scripts executed 1/2, 2/3, etc.
new_status = OPENSTACK_TASK_STATUS_MAP.get(tmp_status)
logger.debug(
"Tmp_status provided:%s, Status maps to %s" %
(tmp_status, new_status))
if not new_status:
# ASSERT: tmp_status = None
return status_name
# ASSERT: new_status exists.
# Determine precedence/override based on status_name.
if status_name in OPENSTACK_ACTIVE_STATES:
return new_status
else:
# This covers cases like 'shutoff - deploy_error' being marked as
# 'shutoff'
return status_name
def strfdelta(tdelta, fmt=None):
from string import Formatter
if not fmt:
# The standard, most human readable format.
fmt = "{D} days {H:02} hours {M:02} minutes {S:02} seconds"
if tdelta == timedelta():
return "0 minutes"
formatter = Formatter()
return_map = {}
div_by_map = {'D': 86400, 'H': 3600, 'M': 60, 'S': 1}
keys = map(lambda x: x[1], list(formatter.parse(fmt)))
remainder = int(tdelta.total_seconds())
for unit in ('D', 'H', 'M', 'S'):
if unit in keys and unit in div_by_map.keys():
return_map[unit], remainder = divmod(remainder, div_by_map[unit])
return formatter.format(fmt, **return_map)
def strfdate(datetime_o, fmt=None):
if not fmt:
# The standard, most human readable format.
fmt = "%m/%d/%Y %H:%M:%S"
if not datetime_o:
datetime_o = timezone.now()
return datetime_o.strftime(fmt)
def find_instance(instance_id):
if type(instance_id) == int:
core_instance = Instance.objects.filter(id=instance_id)
else:
core_instance = Instance.objects.filter(provider_alias=instance_id)
if len(core_instance) > 1:
logger.warn(
"Multiple instances returned for instance_id - %s" %
instance_id)
if core_instance:
return core_instance[0]
return None
def _find_esh_ip(esh_instance):
if esh_instance.ip:
return esh_instance.ip
try:
if not hasattr(esh_instance, "extra")\
or not esh_instance.extra.get("addresses"):
return "0.0.0.0"
ips = esh_instance.extra["addresses"].values()
ip_address = [ip for ip in ips[0]
if ip["OS-EXT-IPS:type"] == "floating"][0]["addr"]
except Exception: # no public ip
try:
ip_address = [ip for ip in ips[0]
if ip["OS-EXT-IPS:type"] == "fixed"][0]["addr"]
except Exception: # no private ip
ip_address = "0.0.0.0"
return ip_address
def _update_core_instance(core_instance, ip_address, password):
core_instance.ip_address = ip_address
if password:
core_instance.password = password
if core_instance.end_date:
logger.warn("ERROR - Instance %s prematurley 'end-dated'."
% core_instance.provider_alias)
core_instance.end_date = None
core_instance.save()
def _find_esh_start_date(esh_instance):
if 'launchdatetime' in esh_instance.extra:
create_stamp = esh_instance.extra.get('launchdatetime')
elif 'launch_time' in esh_instance.extra:
create_stamp = esh_instance.extra.get('launch_time')
elif 'created' in esh_instance.extra:
create_stamp = esh_instance.extra.get('created')
else:
raise Exception(
"Instance does not have a created timestamp. This"
"should never happen. Don't cheat and assume it was created just "
"now. Get the real launch time, bra.")
start_date = _convert_timestamp(create_stamp)
logger.debug("Launched At: %s" % create_stamp)
logger.debug("Started At: %s" % start_date)
return start_date
def _convert_timestamp(iso_8601_stamp):
if not iso_8601_stamp:
return None
try:
datetime_obj = datetime.strptime(
iso_8601_stamp,
'%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
try:
datetime_obj = datetime.strptime(
iso_8601_stamp,
'%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise ValueError(
"Expected ISO8601 Timestamp in Format: "
"YYYY-MM-DDTHH:MM:SS[.ssss][Z]")
# All Dates are UTC relative
datetime_obj = datetime_obj.replace(tzinfo=pytz.utc)
return datetime_obj
def convert_instance_source(
esh_driver,
esh_instance,
esh_source,
provider_uuid,
identity_uuid,
user):
"""
Given the instance source, create the appropriate core REPR and return
"""
from rtwo.models.volume import BaseVolume
from rtwo.models.machine import BaseMachine
# TODO: Future Release..
new_source = None
if isinstance(esh_source, BaseVolume):
core_source = convert_esh_volume(
esh_source,
provider_uuid,
identity_uuid,
user)
elif isinstance(esh_source, BaseMachine):
if isinstance(esh_source, MockMachine):
# MockMachine includes only the Alias/ID information
# so a lookup on the machine is required to get accurate
# information.
new_source = esh_driver.get_machine(esh_source.id)
if not new_source:
core_source = get_or_create_provider_machine(
esh_source.id,
"Inactive Machine for Instance %s" %
esh_instance.id,
provider_uuid)
else:
core_source = convert_esh_machine(esh_driver, new_source,
provider_uuid, user)
elif not isinstance(esh_source, BaseMachine):
raise Exception("Encountered unknown source %s" % esh_source)
return core_source
def convert_esh_instance(
esh_driver,
esh_instance,
provider_uuid,
identity_uuid,
user,
token=None,
password=None):
"""
"""
instance_id = esh_instance.id
ip_address = _find_esh_ip(esh_instance)
source_obj = esh_instance.source
core_instance = find_instance(instance_id)
if core_instance:
_update_core_instance(core_instance, ip_address, password)
else:
start_date = _find_esh_start_date(esh_instance)
logger.debug("Instance: %s" % instance_id)
core_source = convert_instance_source(
esh_driver,
esh_instance,
source_obj,
provider_uuid,
identity_uuid,
user)
logger.debug("CoreSource: %s" % core_source)
# Use New/Existing core Machine to create core Instance
core_instance = create_instance(
provider_uuid,
identity_uuid,
instance_id,
core_source.instance_source,
ip_address,
esh_instance.name,
user,
start_date,
token,
password)
# Add 'esh' object
core_instance.esh = esh_instance
# Update the InstanceStatusHistory
core_size = _esh_instance_size_to_core(esh_driver,
esh_instance, provider_uuid)
# TODO: You are the mole!
core_instance.update_history(
esh_instance.extra['status'],
core_size,
esh_instance.extra.get('task'),
esh_instance.extra.get('metadata', {}).get('tmp_status', "MISSING"))
# Update values in core with those found in metadata.
# core_instance = set_instance_from_metadata(esh_driver, core_instance)
return core_instance
def _esh_instance_size_to_core(esh_driver, esh_instance, provider_uuid):
# NOTE: Querying for esh_size because esh_instance
# Only holds the alias, not all the values.
# As a bonus this is a cached-call
esh_size = esh_instance.size
if isinstance(esh_size, MockSize):
# MockSize includes only the Alias/ID information
# so a lookup on the size is required to get accurate
# information.
# TODO: Switch to 'get_cached_size!'
new_size = esh_driver.get_size(esh_size.id)
if new_size:
esh_size = new_size
core_size = convert_esh_size(esh_size, provider_uuid)
return core_size
def set_instance_from_metadata(esh_driver, core_instance):
"""
NOT BEING USED ANYMORE.. DEPRECATED..
"""
# Fixes Dep. loop - Do not remove
from api.serializers import InstanceSerializer
# Breakout for drivers (Eucalyptus) that don't support metadata
if not hasattr(esh_driver._connection, 'ex_get_metadata'):
# logger.debug("EshDriver %s does not have function 'ex_get_metadata'"
# % esh_driver._connection.__class__)
return core_instance
try:
esh_instance = esh_driver.get_instance(core_instance.provider_alias)
if not esh_instance:
return core_instance
metadata = esh_driver._connection.ex_get_metadata(esh_instance)
except Exception:
logger.exception("Exception retrieving instance metadata for %s" %
core_instance.provider_alias)
return core_instance
# TODO: Match with actual instance launch metadata in service/instance.py
# TODO: Probably best to redefine serializer as InstanceMetadataSerializer
# TODO: Define a creator and their identity by the METADATA instead of
# assuming its the person who 'found' the instance
serializer = InstanceSerializer(core_instance, data=metadata,
partial=True)
if not serializer.is_valid():
logger.warn("Encountered errors serializing metadata:%s"
% serializer.errors)
return core_instance
core_instance = serializer.save()
core_instance.esh = esh_instance
return core_instance
def create_instance(
provider_uuid,
identity_uuid,
provider_alias,
instance_source,
ip_address,
name,
creator,
create_stamp,
token=None,
password=None):
# TODO: Define a creator and their identity by the METADATA instead of
# assuming its the person who 'found' the instance
identity = Identity.objects.get(uuid=identity_uuid)
new_inst = Instance.objects.create(name=name,
provider_alias=provider_alias,
source=instance_source,
ip_address=ip_address,
created_by=creator,
created_by_identity=identity,
token=token,
password=password,
shell=False,
start_date=create_stamp)
new_inst.save()
if token:
logger.debug("New instance created - %s<%s> (Token = %s)" %
(name, provider_alias, token))
else:
logger.debug("New instance object - %s<%s>" %
(name, provider_alias,))
# NOTE: No instance_status_history here, because status is not passed
return new_inst
| |
# (C) William W. Cohen and Carnegie Mellon University, 2016
#
# learning methods for Tensorlog
#
import sys
import time
import math
import numpy as NP
import scipy.sparse as SS
import collections
from tensorlog import config
from tensorlog import dataset
from tensorlog import declare
from tensorlog import funs
from tensorlog import mutil
from tensorlog import opfunutil
# clip to avoid exploding gradients
conf = config.Config()
conf.minGradient = -100; conf.help.minGradient = "Clip gradients smaller than this to minGradient"
conf.maxGradient = +100; conf.help.minGradient = "Clip gradients larger than this to maxGradient"
##############################################################################
# helper classes
##############################################################################
class GradAccumulator(object):
"""Accumulate the sum gradients for perhaps many parameters, indexing
them by parameter name. Also maintains 'counter' statistics,
which are simply floats indexed by a counter name. Counters are
mostly updated by the Tracer functions. The only required counter
is the counter 'n', which is the size of the minibatch the
gradient was computed on.
"""
def __init__(self):
self.runningSum = {}
self.counter = collections.defaultdict(float)
self.reshaped = False
def keys(self):
return list(self.runningSum.keys())
def items(self):
return list(self.runningSum.items())
def __getitem__(self,paramName):
return self.runningSum[paramName]
def __setitem__(self,paramName,gradient):
self.runningSum[paramName] = gradient
def accum(self,paramName,deltaGradient):
"""Increment the parameter with the given name by the appropriate
amount."""
mutil.checkCSR(deltaGradient,('deltaGradient for %s' % str(paramName)))
if not paramName in self.runningSum:
self.runningSum[paramName] = deltaGradient
else:
self.runningSum[paramName] = self.runningSum[paramName] + deltaGradient
mutil.checkCSR(self.runningSum[paramName],('runningSum for %s' % str(paramName)))
#
# manipulate gradients
#
def fitParameterShapes(self):
"""Fix up the running sums so that they are the same shapes as the
parameters they are gradients of (in-place). This is
necessary because in a minibatch of size m row-vector
gradients are stored for each example, so they will have m
rows instead of just one.
"""
if not self.reshaped:
for ((functor,arity),mat) in list(self.items()):
if arity==1:
#for a parameter that is a row-vector, we have one
#gradient per example, so replace it with the mean
self.runningSum[(functor,arity)] = mutil.mean(mat)
else:
# for parameters that are matrices, we have one gradient
# of the right shape, but it is the sum of the gradients
# of the examples in the minibatch
self.runningSum[(functor,arity)] = mat * (1.0/self.counter['n'])
self.reshaped = True
#TODO only used by adagrad, is this the right place for this?
def mapData(self,mapFun):
"""Apply some function to every gradient in the accumulator (in place)."""
result = GradAccumulator()
for k,m in list(self.items()):
result.accum(k, mutil.mapData(mapFun,m))
return result
#TODO only used by adagrad, is this the right place?
def addedTo(self,other):
"""Return a new GradAccumulator with the sum of the gradient,
discarding counters.
"""
result = GradAccumulator()
for k,m in list(self.items()):
result.accum(k, m)
for k,m in list(other.items()):
result.accum(k, m)
return result
#
# helper routines for handling counters and such
#
@staticmethod
def counter():
""" Return a new counter object. """
return collections.defaultdict(float)
@staticmethod
def mergeCounters(counters):
"""Compute the min, max, total, avg, and weighted average of every
counter, and return in a new defaultdict
"""
return GradAccumulator._mergeCountersWithInit(counters,None)
@staticmethod
def accumToCounter(counter,otherCounter):
"""Update counter, which keeps a running run of the min, max, total,
avg, of all otherCounters that have been merged into it.
"""
return GradAccumulator._mergeCountersWithInit([otherCounter],counter)
@staticmethod
def _mergeCountersWithInit(counters,init):
ctr = init if init!=None else GradAccumulator.counter()
keys = set()
weightedTotalPrefix = '_wtot' #temp storage for weighted averages
#reduce with total,min,max, and weighted total
for counter in counters:
ctr['counters'] += 1 # merged counters
for k,v in list(counter.items()):
keys.add(k)
ctr[(k,'tot')] += v
ctr[(k,weightedTotalPrefix)] += counter['n']*v
kmin = (k,'min')
ctr[kmin] = min(ctr.get(kmin,sys.float_info.max),v)
kmax = (k,'max')
ctr[kmax] = max(ctr.get(kmax,sys.float_info.min),v)
# convert weighted total to weighted avg
totn = ctr[('n','tot')]
for k in keys:
ctr[(k,'avg')] += ctr[(k,weightedTotalPrefix)]/totn
del ctr[(k,weightedTotalPrefix)]
return ctr
class Tracer(object):
""" Functions to pass in as arguments to a learner's "tracer"
keyword argument. These are called by the optimizer after
gradient computation for each mode - at this point Y and P are
known.
"""
@staticmethod
def silent(learner,gradAccum,Y,P,**kw):
"""No output."""
gradAccum.counter['n'] = mutil.numRows(Y)
pass
@staticmethod
def cheap(learner,gradAccum,Y,P,**kw):
"""Easy-to-compute status message."""
gradAccum.counter['n'] = mutil.numRows(Y)
Tracer._announce(gradAccum,
Tracer.identification(learner,kw)
+ Tracer.timing(learner,kw))
@staticmethod
def default(learner,gradAccum,Y,P,**kw):
"""A default status message."""
gradAccum.counter['n'] = mutil.numRows(Y)
Tracer._announce(gradAccum,
Tracer.identification(learner,kw)
+ Tracer.loss(learner,Y,P,kw)
+ Tracer.timing(learner,kw))
@staticmethod
def recordDefaults(learner,gradAccum,Y,P,**kw):
"""A default status message."""
gradAccum.counter['n'] = mutil.numRows(Y)
Tracer._record(gradAccum,
Tracer.identification(learner,kw)
+ Tracer.loss(learner,Y,P,kw)
+ Tracer.timing(learner,kw))
@staticmethod
def defaultPlusAcc(learner,gradAccum,Y,P,**kw):
"""A default status message."""
gradAccum.counter['n'] = mutil.numRows(Y)
Tracer._announce(gradAccum,
Tracer.identification(learner,kw)
+ Tracer.loss(learner,Y,P,kw)
+ Tracer.accuracy(learner,Y,P,kw)
+ Tracer.timing(learner,kw))
@staticmethod
def _announce(gradAccum,keyValuePairList):
""" Print info in a list of key value pairs,
and also store them in the gradAccum's counters.
"""
pairs = Tracer._record(gradAccum,keyValuePairList)
print((' '.join(pairs)))
@staticmethod
def _record(gradAccum,keyValuePairList):
"""Prepare a printable list of key value pairs, and also store them
in the gradAccum's counters.
"""
pairs = []
for (k,v) in keyValuePairList:
gradAccum.counter[k] = v
pairs.append(k)
pairs.append('%g' % v)
return pairs
print((' '.join(pairs)))
#
# return lists of key,value pairs that can be used in a status
# message or counters, possibly making use of information from the
# keywords
#
@staticmethod
def loss(learner,Y,P,kw):
#perExample=False since we care about the sum xe+reg which is being optimized
xe = learner.crossEntropy(Y,P,perExample=False)
reg = learner.regularizer.regularizationCost(learner.prog)
return [('loss', (xe+reg)), ('crossEnt', xe), ('reg',reg)]
@staticmethod
def accuracy(learner,Y,P,kw):
acc = learner.accuracy(Y,P)
return [('acc',acc)]
@staticmethod
def timing(learner,kw):
"""Return list of timing properties using keyword 'starttime'
"""
return [('time',(time.time()-kw['startTime']))] if 'startTime' in kw else []
@staticmethod
def identification(learner,kw):
"""Return list of identifying properties taken from keywords and learner.
Known keys are:
i = current epoch
k = current minibatch
mode = current mode
"""
result = []
if 'k' in kw: result.append(('minibatch', kw['k']))
if 'i' in kw: result.append(('epoch', kw['i']+1))
if 'i' in kw: result.append(('maxEpoch',learner.epochs))
if 'mode' in kw: result.append((('mode=%s' % (str(kw['mode']))), 1.0))
return result
#TODO: rework to merge results
class EpochTracer(Tracer):
"""Functions to called by a learner after gradient computation for all
modes and parameter updates.
"""
defaultOutputs = [('crossEnt',['avg','tot']),('loss',['tot']),('reg',['avg']),
('time',['min','avg','max','tot']),
('n',['tot'])]
@staticmethod
def silent(learner,ctr,**kw):
"""No output."""
pass
@staticmethod
def cheap(learner,ctr,**kw):
"""Easy-to-compute status message."""
EpochTracer.default(learner,ctr,**kw)
@staticmethod
def default(learner,ctr,**kw):
"""A default status message."""
pairs = Tracer.identification(learner,kw)
for k,prefs in EpochTracer.defaultOutputs:
for pref in prefs:
pairs.append( ((pref + '.' +k), ctr[(k,pref)]) )
pairs.append(('minibatches',ctr['counters']))
print((' '.join([('%s=%g'%(k_v[0],k_v[1])) for k_v in pairs])))
##############################################################################
# Learners
##############################################################################
class Learner(object):
"""Abstract class with some utility functions.."""
# prog pts to db, rules
def __init__(self,prog,regularizer,tracer,epochTracer):
self.prog = prog
self.regularizer = regularizer or NullRegularizer()
self.tracer = tracer or Tracer.default
self.epochTracer = epochTracer or EpochTracer.default
#
# using and measuring performance
#
def predict(self,mode,X,pad=None):
"""Make predictions on a data matrix associated with the given mode."""
if not pad: pad = opfunutil.Scratchpad()
predictFun = self.prog.getPredictFunction(mode)
result = predictFun.eval(self.prog.db, [X], pad)
return result
def datasetPredict(self,dset,copyXs=True):
""" Return predictions on a dataset. """
xDict = {}
yDict = {}
for mode in dset.modesToLearn():
X = dset.getX(mode)
xDict[mode] = X if copyXs else None
try:
#yDict[mode] = self.prog.getPredictFunction(mode).eval(self.prog.db, [X])
yDict[mode] = self.predict(mode,X)
except:
print(("Trouble with mode %s:" % str(mode), sys.exc_info()[:2]))
raise
return dataset.Dataset(xDict,yDict)
@staticmethod
def datasetAccuracy(goldDset,predictedDset):
""" Return accuracy on a dataset relative to gold labels. """
weightedSum = 0.0
totalWeight = 0.0
for mode in goldDset.modesToLearn():
assert predictedDset.hasMode(mode), "Accuracy: Mode '%s' not available in predictedDset" % mode
Y = goldDset.getY(mode)
P = predictedDset.getY(mode)
weight = mutil.numRows(Y)
weightedSum += weight * Learner.accuracy(Y,P)
totalWeight += weight
if totalWeight == 0: return 0
return weightedSum/totalWeight
@staticmethod
def datasetCrossEntropy(goldDset,predictedDset,perExample=True):
""" Return cross entropy on a dataset. """
result = 0.0
for mode in goldDset.modesToLearn():
assert predictedDset.hasMode(mode), "CrossEntropy: Mode '%s' not available in predictedDset" % mode
Y = goldDset.getY(mode)
P = predictedDset.getY(mode)
divisor = mutil.numRows(Y) if perExample else 1.0
result += Learner.crossEntropy(Y,P,perExample=False)/divisor
return result
@staticmethod
def accuracy(Y,P):
"""Evaluate accuracy of predictions P versus labels Y."""
#TODO surely there's a better way of doing this
def allZerosButArgmax(d):
result = NP.zeros_like(d)
result[d.argmax()] = 1.0
return result
n = mutil.numRows(P)
ok = 0.0
for i in range(n):
pi = P.getrow(i)
yi = Y.getrow(i)
ti = mutil.mapData(allZerosButArgmax,pi)
ok += yi.multiply(ti).sum()
return ok/n
@staticmethod
def crossEntropy(Y,P,perExample=False):
"""Compute cross entropy some predications relative to some labels."""
logP = mutil.mapData(NP.log,P)
result = -(Y.multiply(logP).sum())
return result/mutil.numRows(Y) if perExample else result
#
# gradient computation
#
def crossEntropyGrad(self,mode,X,Y,tracerArgs={},pad=None):
"""Compute the parameter gradient associated with softmax
normalization followed by a cross-entropy cost function. If a
scratchpad is passed in, then intermediate results of the
gradient computation will be saved on that scratchpad.
"""
if not pad: pad = opfunutil.Scratchpad()
# More detail: in learning we use a softmax normalization
# followed immediately by a crossEntropy loss, which has a
# simple derivative when combined - see
# http://peterroelants.github.io/posts/neural_network_implementation_intermezzo02/
# So in doing backprop, you don't call backprop on the outer
# function, instead you compute the initial delta of P-Y, the
# derivative for the loss of the (softmax o crossEntropy)
# function, and it pass that delta down to the inner function
# for softMax
# do the prediction, saving intermediate outputs on the scratchpad
predictFun = self.prog.getPredictFunction(mode)
assert isinstance(predictFun,funs.SoftmaxFunction),'crossEntropyGrad specialized to work for softmax normalization'
P = self.predict(mode,X,pad)
# compute gradient
paramGrads = GradAccumulator()
#TODO assert rowSum(Y) = all ones - that's assumed here in
#initial delta of Y-P
predictFun.fun.backprop(Y-P,paramGrads,pad)
# the tracer function may output status, and may also write
# information to the counters in paramGrads
self.tracer(self,paramGrads,Y,P,**tracerArgs)
return paramGrads
#
# parameter updates
#
def meanUpdate(self,functor,arity,delta,n,totalN=0):
#clip the delta vector to avoid exploding gradients
delta = mutil.mapData(lambda d:NP.clip(d,conf.minGradient,conf.maxGradient), delta)
if arity==1:
#for a parameter that is a row-vector, we have one
#gradient per example and we will take the mean
compensation = 1.0 if totalN==0 else float(n)/totalN
return mutil.mean(delta)*compensation
else:
#for a parameter that is a matrix, we have one gradient for the whole matrix
compensation = (1.0/n) if totalN==0 else (1.0/totalN)
return delta*compensation
def applyUpdate(self,paramGrads,rate):
"""Add each gradient to the appropriate param, after scaling by rate,
and clip negative parameters to zero.
"""
paramGrads.fitParameterShapes()
for (functor,arity),delta in list(paramGrads.items()):
m0 = self.prog.db.getParameter(functor,arity)
m1 = m0 + rate * delta
m2 = mutil.mapData(lambda d:NP.clip(d,0.0,NP.finfo('float32').max), m1)
self.prog.db.setParameter(functor,arity,m2)
#
# actual learner implementations
#
class OnePredFixedRateGDLearner(Learner):
""" Simple one-predicate learner.
"""
def __init__(self,prog,epochs=10,rate=0.1,regularizer=None,tracer=None,epochTracer=None):
super(OnePredFixedRateGDLearner,self).__init__(prog,regularizer=regularizer,tracer=tracer,epochTracer=epochTracer)
self.epochs=epochs
self.rate=rate
def train(self,mode,X,Y):
trainStartTime = time.time()
for i in range(self.epochs):
startTime = time.time()
n = mutil.numRows(X)
args = {'i':i,'startTime':startTime}
paramGrads = self.crossEntropyGrad(mode,X,Y,tracerArgs=args)
self.regularizer.regularizeParams(self.prog,n)
self.applyUpdate(paramGrads,self.rate)
class FixedRateGDLearner(Learner):
""" A batch gradient descent learner.
"""
def __init__(self,prog,epochs=10,rate=0.1,regularizer=None,tracer=None,epochTracer=None):
super(FixedRateGDLearner,self).__init__(prog,regularizer=regularizer,tracer=tracer,epochTracer=epochTracer)
self.epochs=epochs
self.rate=rate
def train(self,dset):
trainStartTime = time.time()
modes = dset.modesToLearn()
numModes = len(modes)
for i in range(self.epochs):
startTime = time.time()
epochCounter = GradAccumulator.counter()
for j,mode in enumerate(dset.modesToLearn()):
n = mutil.numRows(dset.getX(mode))
args = {'i':i,'startTime':startTime,'mode':str(mode)}
try:
paramGrads = self.crossEntropyGrad(mode,dset.getX(mode),dset.getY(mode),tracerArgs=args)
self.regularizer.regularizeParams(self.prog,n)
self.applyUpdate(paramGrads,self.rate)
GradAccumulator.accumToCounter(epochCounter,paramGrads.counter)
except:
print(("Unexpected error at %s:" % str(args), sys.exc_info()[:2]))
raise
self.epochTracer(self,epochCounter,i=i,startTime=trainStartTime)
class FixedRateSGDLearner(FixedRateGDLearner):
""" A stochastic gradient descent learner.
"""
def __init__(self,prog,epochs=10,rate=0.1,regularizer=None,tracer=None,miniBatchSize=100):
super(FixedRateSGDLearner,self).__init__(
prog,epochs=epochs,rate=rate,regularizer=regularizer,tracer=tracer)
self.miniBatchSize = miniBatchSize
def train(self,dset):
trainStartTime = time.time()
modes = dset.modesToLearn()
n = len(modes)
for i in range(self.epochs):
startTime = time.time()
epochCounter = GradAccumulator.counter()
k = 0
for (mode,X,Y) in dset.minibatchIterator(batchSize=self.miniBatchSize):
n = mutil.numRows(X)
k = k+1
args = {'i':i,'k':k,'startTime':startTime,'mode':mode}
try:
paramGrads = self.crossEntropyGrad(mode,X,Y,tracerArgs=args)
self.regularizer.regularizeParams(self.prog,n)
self.applyUpdate(paramGrads,self.rate)
GradAccumulator.accumToCounter(epochCounter,paramGrads.counter)
except:
print(("Unexpected error at %s:" % str(args), sys.exc_info()[:2]))
raise
self.epochTracer(self,epochCounter,i=i,startTime=trainStartTime)
##############################################################################
# regularizers
##############################################################################
class Regularizer(object):
"""Abstract class for regularizers."""
def regularizeParams(self,prog,n):
"""Introduce the regularization gradient to a GradAccumulator."""
assert False, 'abstract method called'
def regularizationCost(self,prog):
"""Report the current regularization cost."""
assert False, 'abstract method called'
class NullRegularizer(object):
""" Default case which does no regularization"""
def regularizeParams(self,prog,n):
pass
def regularizationCost(self,prog):
return 0.0
class L2Regularizer(Regularizer):
""" L2 regularization toward 0."""
def __init__(self,regularizationConstant=0.01):
self.regularizationConstant = regularizationConstant
def regularizeParams(self,prog,n):
for functor,arity in prog.getParamList():
m0 = prog.db.getParameter(functor,arity)
m1 = m0 * (1.0 - self.regularizationConstant)
prog.db.setParameter(functor,arity,m1)
def regularizationCost(self,prog):
result = 0
for functor,arity in prog.getParamList():
m = prog.db.getParameter(functor,arity)
result += (m.data * m.data).sum()
return result*self.regularizationConstant
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_device_sshd
short_description: Manage the SSHD settings of a BIG-IP.
description:
- Manage the SSHD settings of a BIG-IP.
version_added: "2.2"
options:
allow:
description:
- Specifies, if you have enabled SSH access, the IP address or address
range for other systems that can use SSH to communicate with this
system.
choices:
- all
- IP address, such as 172.27.1.10
- IP range, such as 172.27.*.* or 172.27.0.0/255.255.0.0
banner:
description:
- Whether to enable the banner or not.
required: False
choices:
- enabled
- disabled
default: None
banner_text:
description:
- Specifies the text to include on the pre-login banner that displays
when a user attempts to login to the system using SSH.
required: False
default: None
inactivity_timeout:
description:
- Specifies the number of seconds before inactivity causes an SSH
session to log out.
required: False
default: None
log_level:
description:
- Specifies the minimum SSHD message level to include in the system log.
choices:
- debug
- debug1
- debug2
- debug3
- error
- fatal
- info
- quiet
- verbose
required: False
default: None
login:
description:
- Specifies, when checked C(enabled), that the system accepts SSH
communications.
choices:
- enabled
- disabled
required: False
default: None
port:
description:
- Port that you want the SSH daemon to run on.
required: False
default: None
notes:
- Requires the f5-sdk Python package on the host This is as easy as pip
install f5-sdk.
- Requires BIG-IP version 12.0.0 or greater
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Set the banner for the SSHD service from a string
bigip_device_sshd:
banner: "enabled"
banner_text: "banner text goes here"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
delegate_to: localhost
- name: Set the banner for the SSHD service from a file
bigip_device_sshd:
banner: "enabled"
banner_text: "{{ lookup('file', '/path/to/file') }}"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
delegate_to: localhost
- name: Set the SSHD service to run on port 2222
bigip_device_sshd:
password: "secret"
port: 2222
server: "lb.mydomain.com"
user: "admin"
delegate_to: localhost
'''
RETURN = '''
allow:
description: >
Specifies, if you have enabled SSH access, the IP address or address
range for other systems that can use SSH to communicate with this
system.
returned: changed
type: string
sample: "192.0.2.*"
banner:
description: Whether the banner is enabled or not.
returned: changed
type: string
sample: "true"
banner_text:
description: >
Specifies the text included on the pre-login banner that
displays when a user attempts to login to the system using SSH.
returned: changed and success
type: string
sample: "This is a corporate device. Connecting to it without..."
inactivity_timeout:
description: >
The number of seconds before inactivity causes an SSH.
session to log out
returned: changed
type: int
sample: "10"
log_level:
description: The minimum SSHD message level to include in the system log.
returned: changed
type: string
sample: "debug"
login:
description: Specifies that the system accepts SSH communications or not.
return: changed
type: bool
sample: true
port:
description: Port that you want the SSH daemon to run on.
return: changed
type: int
sample: 22
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'bannerText': 'banner_text',
'inactivityTimeout': 'inactivity_timeout',
'logLevel': 'log_level'
}
api_attributes = [
'allow', 'banner', 'bannerText', 'inactivityTimeout', 'logLevel',
'login', 'port'
]
updatables = [
'allow', 'banner', 'banner_text', 'inactivity_timeout', 'log_level',
'login', 'port'
]
returnables = [
'allow', 'banner', 'banner_text', 'inactivity_timeout', 'log_level',
'login', 'port'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def inactivity_timeout(self):
if self._values['inactivity_timeout'] is None:
return None
return int(self._values['inactivity_timeout'])
@property
def port(self):
if self._values['port'] is None:
return None
return int(self._values['port'])
@property
def allow(self):
if self._values['allow'] is None:
return None
allow = self._values['allow']
return list(set([str(x) for x in allow]))
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def read_current_from_device(self):
resource = self.client.api.tm.sys.sshd.load()
result = resource.attrs
return Parameters(result)
def update(self):
self.have = self.read_current_from_device()
if self.have.dhcp:
raise F5ModuleError(
"DHCP on the mgmt interface must be disabled to make use of"
"this module"
)
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.sys.sshd.load()
resource.update(**params)
class ArgumentSpec(object):
def __init__(self):
self.choices = ['enabled', 'disabled']
self.levels = [
'debug', 'debug1', 'debug2', 'debug3', 'error', 'fatal', 'info',
'quiet', 'verbose'
]
self.supports_check_mode = True
self.argument_spec = dict(
allow=dict(
required=False,
default=None,
type='list'
),
banner=dict(
required=False,
default=None,
choices=self.choices
),
banner_text=dict(
required=False,
default=None
),
inactivity_timeout=dict(
required=False,
default=None,
type='int'
),
log_level=dict(
required=False,
default=None,
choices=self.levels
),
login=dict(
required=False,
default=None,
choices=self.choices
),
port=dict(
required=False,
default=None,
type='int'
),
state=dict(
default='present',
choices=['present']
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Document matcher for Search API stub.
DocumentMatcher provides an approximation of the Search API's query matching.
"""
import datetime
from google.appengine.datastore import document_pb
from google.appengine._internal.antlr3 import tree
from google.appengine.api.search import geo_util
from google.appengine.api.search import query_parser
from google.appengine.api.search import QueryParser
from google.appengine.api.search import search_util
from google.appengine.api.search.stub import simple_tokenizer
from google.appengine.api.search.stub import tokens
MSEC_PER_DAY = 86400000
class ExpressionTreeException(Exception):
"""An error occurred while analyzing/translating the expression parse tree."""
def __init__(self, msg):
Exception.__init__(self, msg)
class DistanceMatcher(object):
"""A class to match on geo distance."""
def __init__(self, geopoint, distance):
self._geopoint = geopoint
self._distance = distance
def _CheckOp(self, op):
if op == QueryParser.EQ or op == QueryParser.HAS:
raise ExpressionTreeException('Equality comparison not available for Geo type')
if op == QueryParser.NE:
raise ExpressionTreeException('!= comparison operator is not available')
if op not in (QueryParser.GT, QueryParser.GE, QueryParser.LESSTHAN, QueryParser.LE):
raise search_util.UnsupportedOnDevError(
'Operator %s not supported for distance matches on development server.'
% str(op))
def _IsDistanceMatch(self, geopoint, op):
distance = geopoint - self._geopoint
if op == QueryParser.GT or op == QueryParser.GE:
return distance >= self._distance
if op == QueryParser.LESSTHAN or op == QueryParser.LE:
return distance <= self._distance
else:
raise AssertionError, 'unexpected op %s' % str(op)
def IsMatch(self, field_values, op):
self._CheckOp(op)
for field_value in field_values:
geo_pb = field_value.geo()
geopoint = geo_util.LatLng(geo_pb.lat(), geo_pb.lng())
if self._IsDistanceMatch(geopoint, op):
return True
return False
class DocumentMatcher(object):
"""A class to match documents with a query."""
def __init__(self, query, inverted_index):
self._query = query
self._inverted_index = inverted_index
self._parser = simple_tokenizer.SimpleTokenizer()
def _PostingsForToken(self, token):
"""Returns the postings for the token."""
return self._inverted_index.GetPostingsForToken(token)
def _PostingsForFieldToken(self, field, value):
"""Returns postings for the value occurring in the given field."""
value = simple_tokenizer.NormalizeString(value)
return self._PostingsForToken(
tokens.Token(chars=value, field_name=field))
def _MatchRawPhraseWithRawAtom(self, field_text, phrase_text):
tokenized_phrase = self._parser.TokenizeText(
phrase_text, input_field_type=document_pb.FieldValue.ATOM)
tokenized_field_text = self._parser.TokenizeText(
field_text, input_field_type=document_pb.FieldValue.ATOM)
return tokenized_phrase == tokenized_field_text
def _MatchPhrase(self, field, match, document):
"""Match a textual field with a phrase query node."""
field_text = field.value().string_value()
phrase_text = query_parser.GetPhraseQueryNodeText(match)
if field.value().type() == document_pb.FieldValue.ATOM:
return self._MatchRawPhraseWithRawAtom(field_text, phrase_text)
if not phrase_text:
return False
phrase = self._parser.TokenizeText(
search_util.RemoveAccentsNfkd(phrase_text))
field_text = self._parser.TokenizeText(
search_util.RemoveAccentsNfkd(field_text))
if not phrase:
return True
posting = None
for post in self._PostingsForFieldToken(field.name(), phrase[0].chars):
if post.doc_id == document.id():
posting = post
break
if not posting:
return False
def ExtractWords(token_list):
return (token.chars for token in token_list)
for position in posting.positions:
match_words = zip(ExtractWords(field_text[position:]),
ExtractWords(phrase))
if len(match_words) != len(phrase):
continue
match = True
for doc_word, match_word in match_words:
if doc_word != match_word:
match = False
if match:
return True
return False
def _MatchTextField(self, field, match, document):
"""Check if a textual field matches a query tree node."""
if match.getType() == QueryParser.FUZZY:
return self._MatchTextField(field, match.getChild(0), document)
if match.getType() == QueryParser.VALUE:
if query_parser.IsPhrase(match):
return self._MatchPhrase(field, match, document)
if field.value().type() == document_pb.FieldValue.ATOM:
return (field.value().string_value() ==
query_parser.GetQueryNodeText(match))
query_tokens = self._parser.TokenizeText(
query_parser.GetQueryNodeText(match))
if not query_tokens:
return True
if len(query_tokens) > 1:
def QueryNode(token):
return query_parser.CreateQueryNode(
search_util.RemoveAccentsNfkd(token.chars), QueryParser.TEXT)
return all(self._MatchTextField(field, QueryNode(token), document)
for token in query_tokens)
token_text = search_util.RemoveAccentsNfkd(query_tokens[0].chars)
matching_docids = [
post.doc_id for post in self._PostingsForFieldToken(
field.name(), token_text)]
return document.id() in matching_docids
def ExtractGlobalEq(node):
op = node.getType()
if ((op == QueryParser.EQ or op == QueryParser.HAS) and
len(node.children) >= 2):
if node.children[0].getType() == QueryParser.GLOBAL:
return node.children[1]
return node
if match.getType() == QueryParser.CONJUNCTION:
return all(self._MatchTextField(field, ExtractGlobalEq(child), document)
for child in match.children)
if match.getType() == QueryParser.DISJUNCTION:
return any(self._MatchTextField(field, ExtractGlobalEq(child), document)
for child in match.children)
if match.getType() == QueryParser.NEGATION:
raise ExpressionTreeException('Unable to compare \"' + field.name() +
'\" with negation')
return False
def _GetFieldName(self, field):
"""Get the field name of the given field node."""
if isinstance(field, tree.CommonTree):
return query_parser.GetQueryNodeText(field)
return field
def _CheckValidDateComparison(self, field_name, match):
"""Check if match is a valid date value."""
if match.getType() == QueryParser.VALUE:
try:
match_val = query_parser.GetPhraseQueryNodeText(match)
datetime.datetime.strptime(match_val, '%Y-%m-%d')
except ValueError:
raise ExpressionTreeException('Unable to compare "%s" with "%s"' %
(field_name, match_val))
def _MatchDateField(self, field, match, operator, document):
"""Check if a date field matches a query tree node."""
try:
self._CheckValidDateComparison(field.name(), match)
except ExpressionTreeException:
return False
return self._MatchComparableField(
field, match, _DateStrToDays, operator, document)
def _MatchNumericField(self, field, match, operator, document):
"""Check if a numeric field matches a query tree node."""
return self._MatchComparableField(field, match, float, operator, document)
def _MatchGeoField(self, field, matcher, operator, document):
"""Check if a geo field matches a query tree node."""
if not isinstance(matcher, DistanceMatcher):
return False
field = self._GetFieldName(field)
values = [field.value() for field in
search_util.GetAllFieldInDocument(document, field) if
field.value().type() == document_pb.FieldValue.GEO]
return matcher.IsMatch(values, operator)
def _MatchComparableField(
self, field, match, cast_to_type, op, document):
"""A generic method to test matching for comparable types.
Comparable types are defined to be anything that supports <, >, <=, >=, ==.
For our purposes, this is numbers and dates.
Args:
field: The document_pb.Field to test
match: The query node to match against
cast_to_type: The type to cast the node string values to
op: The query node type representing the type of comparison to perform
document: The document that the field is in
Returns:
True iff the field matches the query.
Raises:
UnsupportedOnDevError: Raised when an unsupported operator is used, or
when the query node is of the wrong type.
ExpressionTreeException: Raised when a != inequality operator is used.
"""
field_val = cast_to_type(field.value().string_value())
if match.getType() == QueryParser.VALUE:
try:
match_val = cast_to_type(query_parser.GetPhraseQueryNodeText(match))
except ValueError:
return False
else:
return False
if op == QueryParser.EQ or op == QueryParser.HAS:
return field_val == match_val
if op == QueryParser.NE:
raise ExpressionTreeException('!= comparison operator is not available')
if op == QueryParser.GT:
return field_val > match_val
if op == QueryParser.GE:
return field_val >= match_val
if op == QueryParser.LESSTHAN:
return field_val < match_val
if op == QueryParser.LE:
return field_val <= match_val
raise search_util.UnsupportedOnDevError(
'Operator %s not supported for numerical fields on development server.'
% match.getText())
def _CheckInvalidNumericComparison(self, match, document):
"""Check if this is an invalid numeric comparison.
Valid numeric comparisons are "numeric_field OP numeric_constant" where OP
is one of [>, <, >=, <=, =, :].
Args:
match: The right hand side argument of the operator.
document: The document we are checking for a match.
Raises:
ExpressionTreeException: when right hand side of numeric comparison is not
a numeric constant.
"""
match_text = query_parser.GetQueryNodeText(match)
match_fields = search_util.GetFieldInDocument(document, match_text,
document_pb.FieldValue.NUMBER)
if match_fields:
raise ExpressionTreeException(
'Expected numeric constant, found \"' + match_text + '\"')
def _MatchAnyField(self, field, match, operator, document):
"""Check if a field matches a query tree.
Args:
field: the name of the field, or a query node containing the field.
match: A query node to match the field with.
operator: The query node type corresponding to the type of match to
perform (eg QueryParser.EQ, QueryParser.GT, etc).
document: The document to match.
Raises:
ExpressionTreeException: when != operator is used or right hand side of
numeric inequality is not a numeric constant.
"""
fields = search_util.GetAllFieldInDocument(document,
self._GetFieldName(field))
self._CheckInvalidNumericComparison(match, document)
return any(self._MatchField(f, match, operator, document) for f in fields)
def _MatchField(self, field, match, operator, document):
"""Check if a field matches a query tree.
Args:
field: a document_pb.Field instance to match.
match: A query node to match the field with.
operator: The a query node type corresponding to the type of match to
perform (eg QueryParser.EQ, QueryParser.GT, etc).
document: The document to match.
"""
if field.value().type() in search_util.TEXT_DOCUMENT_FIELD_TYPES:
if operator != QueryParser.EQ and operator != QueryParser.HAS:
return False
return self._MatchTextField(field, match, document)
if field.value().type() in search_util.NUMBER_DOCUMENT_FIELD_TYPES:
return self._MatchNumericField(field, match, operator, document)
if field.value().type() == document_pb.FieldValue.DATE:
return self._MatchDateField(field, match, operator, document)
if field.value().type() == document_pb.FieldValue.GEO:
return False
type_name = document_pb.FieldValue.ContentType_Name(
field.value().type()).lower()
raise search_util.UnsupportedOnDevError(
'Matching fields of type %s is unsupported on dev server (searched for '
'field %s)' % (type_name, field.name()))
def _MatchGlobal(self, match, document):
for field in document.field_list():
try:
if self._MatchAnyField(field.name(), match, QueryParser.EQ, document):
return True
except search_util.UnsupportedOnDevError:
pass
return False
def _ResolveDistanceArg(self, node):
if node.getType() == QueryParser.VALUE:
return query_parser.GetQueryNodeText(node)
if node.getType() == QueryParser.FUNCTION:
name, args = node.children
if name.getText() == 'geopoint':
lat, lng = (float(query_parser.GetQueryNodeText(v)) for v in args.children)
return geo_util.LatLng(lat, lng)
return None
def _MatchFunction(self, node, match, operator, document):
name, args = node.children
if name.getText() == 'distance':
x, y = args.children
x, y = self._ResolveDistanceArg(x), self._ResolveDistanceArg(y)
if isinstance(x, geo_util.LatLng) and isinstance(y, basestring):
x, y = y, x
if isinstance(x, basestring) and isinstance(y, geo_util.LatLng):
distance = float(query_parser.GetQueryNodeText(match))
matcher = DistanceMatcher(y, distance)
return self._MatchGeoField(x, matcher, operator, document)
return False
def _IsHasGlobalValue(self, node):
if node.getType() == QueryParser.HAS and len(node.children) == 2:
if (node.children[0].getType() == QueryParser.GLOBAL and
node.children[1].getType() == QueryParser.VALUE):
return True
return False
def _MatchGlobalPhrase(self, node, document):
"""Check if a document matches a parsed global phrase."""
if not all(self._IsHasGlobalValue(child) for child in node.children):
return False
value_nodes = (child.children[1] for child in node.children)
phrase_text = ' '.join(
(query_parser.GetQueryNodeText(node) for node in value_nodes))
for field in document.field_list():
if self._MatchRawPhraseWithRawAtom(field.value().string_value(),
phrase_text):
return True
return False
def _CheckMatch(self, node, document):
"""Check if a document matches a query tree.
Args:
node: the query node to match
document: the document to match
Returns:
True iff the query node matches the document.
Raises:
ExpressionTreeException: when != operator is used or numeric value is used
in comparison for DATE field.
"""
if node.getType() == QueryParser.SEQUENCE:
result = all(self._CheckMatch(child, document) for child in node.children)
return result or self._MatchGlobalPhrase(node, document)
if node.getType() == QueryParser.CONJUNCTION:
return all(self._CheckMatch(child, document) for child in node.children)
if node.getType() == QueryParser.DISJUNCTION:
return any(self._CheckMatch(child, document) for child in node.children)
if node.getType() == QueryParser.NEGATION:
return not self._CheckMatch(node.children[0], document)
if node.getType() in query_parser.COMPARISON_TYPES:
if node.getType() == QueryParser.NE:
raise ExpressionTreeException('!= comparison operator is not available')
lhs, match = node.children
if lhs.getType() == QueryParser.GLOBAL:
return self._MatchGlobal(match, document)
elif lhs.getType() == QueryParser.FUNCTION:
return self._MatchFunction(lhs, match, node.getType(), document)
schema = self._inverted_index.GetSchema()
field_name = self._GetFieldName(lhs)
if field_name in schema:
field_type_list = schema[field_name].type_list()
if all(f == document_pb.FieldValue.DATE for f in field_type_list):
self._CheckValidDateComparison(field_name, match)
return self._MatchAnyField(lhs, match, node.getType(), document)
return False
def Matches(self, document):
return self._CheckMatch(self._query, document)
def FilterDocuments(self, documents):
return (doc for doc in documents if self.Matches(doc))
def _DateStrToDays(date_str):
date = search_util.DeserializeDate(date_str)
return search_util.EpochTime(date) / MSEC_PER_DAY
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
from botocore.vendored.requests.exceptions import ReadTimeout
from py_swf.errors import NoTaskFound
__all__ = ['DecisionClient', 'DecisionTask']
DecisionTask = namedtuple('DecisionTask', 'events task_token workflow_id workflow_run_id workflow_type')
"""Contains the metadata to execute a decision task.
See the response syntax in :meth:`~SWF.Client.poll_for_decision_task`.
"""
def nametuplefy(thing):
"""Recursively turns a dict into namedtuples."""
if type(thing) == dict:
# Only supports string keys
Dict = namedtuple('Dict', ' '.join(thing.keys()))
nametuplefied_children = {}
for k, v in thing.items():
nametuplefied_children[k] = nametuplefy(v)
return Dict(**nametuplefied_children)
if type(thing) == list:
return list(map(nametuplefy, thing))
else:
return thing
class DecisionClient(object):
"""A client that provides a pythonic API for polling and responding to decision tasks through an SWF boto3 client.
:param decision_config: Contains SWF values commonly used when making SWF api calls.
:type decision_config: :class:`~py_swf.config_definitions.DecisionConfig`
:param boto_client: A raw SWF boto3 client.
:type boto_client: :class:`~SWF.Client`
"""
def __init__(self, decision_config, boto_client):
self.decision_config = decision_config
self.boto_client = boto_client
def poll(self, identity=None, use_raw_event_history=False):
"""Opens a connection to AWS and long-polls for decision tasks.
When a decision is available, this function will return with exactly one decision task to execute.
Only returns a contiguous subset of the most recent events.
If you want to grab the entire history for a workflow, use :meth:`~py_swf.decision.DecisionClient.walk_execution_history`
Passthrough to :meth:`~SWF.Client.poll_for_decision_task`.
:param identity: A freeform text that identifies the client that performed the longpoll. Useful for debugging history.
:type identity: string
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:return: A decision task to execute.
:rtype: DecisionTask
:raises py_swf.errors.NoTaskFound: Raised when polling for a decision task times out without receiving any tasks.
"""
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=True,
taskList={
'name': self.decision_config.task_list,
},
)
# boto doesn't like None values for optional kwargs
if identity is not None:
kwargs['identity'] = identity
try:
results = self.boto_client.poll_for_decision_task(
**kwargs
)
except ReadTimeout as e:
raise NoTaskFound(e)
# Sometimes SWF gives us an incomplete response, ignore these.
if not results.get('taskToken', None):
raise NoTaskFound('Received results with no taskToken')
events = results['events']
if not use_raw_event_history:
events = nametuplefy(events)
return DecisionTask(
events=events,
task_token=results['taskToken'],
workflow_id=results['workflowExecution']['workflowId'],
workflow_run_id=results['workflowExecution']['runId'],
workflow_type=results['workflowType'],
)
def walk_execution_history(
self,
workflow_id,
workflow_run_id,
reverse_order=True,
use_raw_event_history=False,
maximum_page_size=1000,
):
"""Lazily walks through the entire workflow history for a given workflow_id. This will make successive calls
to SWF on demand when pagination is needed.
See :meth:`~SWF.Client.get_workflow_execution_history` for more information.
:param workflow_id: The workflow_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param workflow_run_id: The workflow_run_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param reverse_order: Passthru for reverseOrder to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: bool
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:param maximum_page_size: Passthru for maximumPageSize to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: int
:return: A generator that returns successive elements in the workflow execution history.
:rtype: collections.Iterable
"""
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=reverse_order,
execution=dict(
workflowId=workflow_id,
runId=workflow_run_id,
),
maximumPageSize=maximum_page_size,
)
while True:
results = self.boto_client.get_workflow_execution_history(
**kwargs
)
next_page_token = results.get('nextPageToken', None)
events = results['events']
for event in events:
if not use_raw_event_history:
event = nametuplefy(event)
yield event
if next_page_token is None:
break
kwargs['nextPageToken'] = next_page_token
def finish_decision_with_activity(self, task_token, activity_id, activity_name, activity_version, activity_input):
"""Responds to a given decision task's task_token to schedule an activity task to run.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param activity_id: A unique identifier for the activity task.
:type identity: string
:param activity_name: Which activity name to execute.
:type identity: string
:param activity_name: Version of the activity name.
:type identity: string
:param activity_input: Freeform text of the input for the activity
:type identity: string
:return: None
:rtype: NoneType
"""
activity_task = build_activity_task(
activity_id,
activity_name,
activity_version,
activity_input,
self.decision_config,
)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[activity_task],
)
def finish_workflow(self, task_token, result):
"""Responds to a given decision task's task_token to finish and terminate the workflow.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param result: Freeform text that represents the final result of the workflow.
:type identity: string
:return: None
:rtype: NoneType
"""
workflow_complete = build_workflow_complete(result)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[workflow_complete],
)
def build_workflow_complete(result):
return {
'decisionType': 'CompleteWorkflowExecution',
'completeWorkflowExecutionDecisionAttributes': {
'result': result,
},
}
def build_activity_task(activity_id, activity_name, activity_version, input, decision_config):
return {
'decisionType': 'ScheduleActivityTask',
'scheduleActivityTaskDecisionAttributes': {
'activityType': {
'name': activity_name,
'version': activity_version,
},
'activityId': activity_id,
'input': input,
'taskList': {
'name': decision_config.task_list,
},
'scheduleToCloseTimeout': str(decision_config.schedule_to_close_timeout),
'scheduleToStartTimeout': str(decision_config.schedule_to_start_timeout),
'startToCloseTimeout': str(decision_config.start_to_close_timeout),
'heartbeatTimeout': str(decision_config.heartbeat_timeout),
},
}
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for bucketing data into groups.
The classes and functions in this module are used to queue up data into
buckets conditional on side information (e.g. sequence length).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_as_original_type = input_py._as_original_type
_as_tensor_list = input_py._as_tensor_list
_restore_sparse_tensors = input_py._restore_sparse_tensors
_dtypes = input_py._dtypes
_store_sparse_tensors = input_py._store_sparse_tensors
_shapes = input_py._shapes
_which_queue = input_py._which_queue
# pylint: enable=protected-access
def _validate_bucket(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in bucket().")
return tensor_list
def bucket(tensors,
which_bucket,
batch_size,
num_buckets,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=None,
shared_name=None,
name=None):
"""Lazy bucketing of input tensors according to `which_bucket`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
The tensors entering this function are put into the bucket given by
`which_bucket`. Each bucket has its own queue. When a bucket contains
`batch_size` elements, this minibatch is pushed onto a top queue. The
tensors returned from this function are a the result of dequeueing the
next minibatch from this top queue.
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
As the returned tensors are the result of of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queues are closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape()` method will have a 0th `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.
batch_size: The new batch size pulled from the queue
(python int or int32 scalar).
num_buckets: A python integer, the number of buckets.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor
controls whether the input is added to the queue or not. If it evaluates
`True`, then `tensors` are added to the bucket; otherwise they are
dropped. This tensor essentially acts as a filtering mechanism.
The default behavior is to assume `keep_input=True`.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(bucket, outputs)` where `bucket` is
a `int32` scalar tensor and `outputs` is a list or
dictionary of batched outputs corresponding to elements of `tensors`.
Every step will receive a new bucket of outputs.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "bucket", tensor_list) as name:
tensor_list = _validate_bucket(tensor_list)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many=False)
# Round-trip batch_size to a tensor, and possibly back
batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int32, name="batch_size")
static_batch_size = tensor_util.constant_value(batch_size)
batch_size = (
static_batch_size if static_batch_size is not None else batch_size)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many=False)
which_bucket = ops.convert_to_tensor(
which_bucket, dtype=dtypes.int32, name="which_bucket")
queue_creator = _which_queue(dynamic_pad)
bucket_queues = []
for i in range(num_buckets):
shared_name_i = (
"%s_%d" % (shared_name, i) if shared_name is not None else None)
bucket_queues.append(
queue_creator(capacity=capacity,
dtypes=types,
shapes=shapes,
shared_name=shared_name_i, name="bucket_queue_%d" % i))
maybe_static_batch_size = (
None if allow_smaller_final_batch else static_batch_size)
bucket_shapes = [tensor_shape.vector(maybe_static_batch_size).concatenate(s)
for s in bucket_queues[0].shapes]
# top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO
# queues because if we use allow_smaller_final_batch, shapes will
# contain Nones in their first entry; as a result, a regular
# FIFOQueue would die when being passed shapes that are not fully defined.
top_queue = data_flow_ops.PaddingFIFOQueue(
capacity=capacity,
dtypes=[dtypes.int32] + types,
shapes=[tensor_shape.scalar()] + bucket_shapes,
shared_name=shared_name, name="top_queue")
def enqueue_which():
def enqueue_single(i):
return bucket_queues[i].enqueue(tensor_list)
enqueues = [
control_flow_ops.cond(
math_ops.equal(which_bucket, i),
functools.partial(enqueue_single, i),
control_flow_ops.no_op)
for i in range(num_buckets)]
return control_flow_ops.group(*enqueues, name="group_enqueues")
if keep_input is not None:
# TODO(ebrevdo): Expand keep_input param to core training
# methods, and pipe through to _store_sparse_tensors; so
# that expensive serialization is guarded by keep_input.
maybe_enqueue = control_flow_ops.cond(
keep_input,
enqueue_which,
control_flow_ops.no_op)
else:
maybe_enqueue = enqueue_which()
bucket_enqueue_ops = [maybe_enqueue] * num_threads
if allow_smaller_final_batch:
which_dequeue = lambda q: q.dequeue_up_to
else:
which_dequeue = lambda q: q.dequeue_many
enqueues_to_top = [
top_queue.enqueue(
[constant_op.constant(i)] +
which_dequeue(q)(batch_size, name="read_bucket_%d" % i),
name="enqueue_from_bucket_%d" % i)
for i, q in enumerate(bucket_queues)]
for i, q in enumerate(bucket_queues):
queue_runner.add_queue_runner(queue_runner.QueueRunner(
q, [enqueues_to_top[i]],
queue_closed_exception_types=(
errors.OutOfRangeError, errors.CancelledError)))
queue_runner.add_queue_runner(queue_runner.QueueRunner(
top_queue, bucket_enqueue_ops,
queue_closed_exception_types=(
errors.OutOfRangeError, errors.CancelledError)))
for q in bucket_queues:
logging_ops.scalar_summary(
"bucket/%s/size" % q.name,
math_ops.cast(top_queue.size(), dtypes.float32))
logging_ops.scalar_summary(
"bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
math_ops.cast(top_queue.size(), dtypes.float32) * (1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
dequeued = dequeued[1:]
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return (which_bucket_dequeued, _as_original_type(tensors, dequeued))
def bucket_by_sequence_length(input_length,
tensors,
batch_size,
bucket_boundaries,
num_threads=1,
capacity=32,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=None,
shared_name=None,
name=None):
"""Lazy bucketing of inputs according to their length.
This method calls `tf.contrib.training.bucket` under the hood, after first
subdividing the bucket boundaries into separate buckets and identifying which
bucket the given `input_length` belongs to. See the documentation for
`which_bucket` for details of the other arguments.
Args:
input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue
(python int or int32 scalar).
bucket_boundaries: int list, increasing non-negative numbers.
The edges of the buckets to use when bucketing tensors. Two extra buckets
are created, one for `input_length < bucket_boundaries[0]` and
one for `input_length >= bucket_boundaries[-1]`.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: (Optional). A `bool` scalar Tensor. If provided, this tensor
controls whether the input is added to the queue or not. If it evaluates
`True`, then `tensors` are added to the bucket; otherwise they are
dropped. This tensor essentially acts as a filtering mechanism.
The default behavior is to assume `keep_input=True`.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
TypeError: if `bucket_boundaries` is not a list of python integers.
ValueError: if `bucket_boundaries` is empty or contains non-increasing
values.
"""
tensor_list = _as_tensor_list(tensors)
if not isinstance(bucket_boundaries, (list, tuple)):
raise TypeError(
"bucket_boundaries must be a list or tuple, but received: %s"
% bucket_boundaries)
if not bucket_boundaries:
raise ValueError("bucket_boundaries must not be empty")
for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
if not isinstance(s, int) or not isinstance(e, int):
raise TypeError(
"bucket boundaries must be integers, but saw: %s and %s" % (s, e))
if s >= e:
raise ValueError(
"Buckets must contain sequential increasing lengths, but saw: "
"%d before %d" % (s, e))
with ops.name_scope(name, "bucket_by_sequence_length",
[input_length] + tensor_list) as name:
input_length = ops.convert_to_tensor(
input_length, dtype=dtypes.int32, name="input_length")
# Bucketing conditions are:
# l < b[0]
# b[0] <= l < b[1]
# b[1] <= l < b[2]
# ...
# b[N-2] <= l < b[N-1]
# b[N-1] <= l
# Equivalent to:
# [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
which_bucket = math_ops.to_int32(which_bucket)
if shapes is not None:
shapes = [tensor_shape.scalar()] + shapes
_, dequeued = bucket(
tensors=[input_length] + tensor_list,
which_bucket=which_bucket,
batch_size=batch_size,
num_buckets=len(bucket_boundaries) + 1,
num_threads=num_threads,
capacity=capacity,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
keep_input=keep_input,
shared_name=shared_name)
return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
__all__ = [
"bucket",
"bucket_by_sequence_length"
]
| |
# -*- coding: utf-8 -*-
"""
@author: Jingnan Shi
@contact: jshi@g.hmc.edu
To-do: TABLE environment FLOAT Checkbutton
"""
from Tkinter import *
import tkMessageBox
from table import Table
from tkFileDialog import *
class App:
def __init__(self, master):
# initialize Table object
self.table = Table()
self.master = master
# title
title = Label(master,text= "pyTable",font=("Helvetica", 16))
self.title = title
# raw excel/csv table
t_raw_table = Text(master, width=40, height=24,
wrap=WORD)
self.text_raw = t_raw_table
# http://stackoverflow.com/questions/5870561/re-binding-select-all-in-text-widget
# Thank you, Bryan Oakley!
master.bind_class("Text","<Command-a>", self.selectall)
start_str = "Please delete this line and copy your excel table here"
self.text_raw.insert(INSERT, start_str)
# text box to display produced latex code
t_latex_table = Text(master,state = DISABLED, width=40, height=24,
wrap=WORD)
self.text_texed = t_latex_table
# options to select
# label
format_lab = Label(master,text = "Choose the format of your table: ")
self.format_lab = format_lab
# raw data format
format_list = Listbox(master,selectmode = SINGLE,exportselection = False, height=2)
self.formats = self.table.getFormats()
for i in range(len(self.formats)):
format_list.insert(i,self.formats[i])
self.format_list = format_list
# set focus
self.format_list.select_set(0)
self.format_list.event_generate("<<ListboxSelect>>")
# label
style_lab = Label(master,text = "Choose the LaTeX style you want: ")
self.style_lab = style_lab
# style: booktabs, longtable, simple, cells
#scrollbar = Scrollbar(master)
style_list = Listbox(master,exportselection = False, selectmode = SINGLE,height=4)
self.styles = self.table.getStyles()
for i in range(len(self.styles)):
style_list.insert(i, self.styles[i])
self.style_list = style_list
# set focus
self.style_list.select_set(0)
self.style_list.event_generate("<<ListboxSelect>>")
# math? checkbox
self.math_var = IntVar()
math = Checkbutton(master, variable = self.math_var, text = "math?")
self.math_check = math
# convert button
convert_button = Button(master, text="Convert", command=self.convert,
padx = 3, pady = 2)
self.convert_button = convert_button
# copy from clipboard
paste_from_clipboard = Button(master, text="Paste from Clipboard", command=self.pasteFromClipboard, padx = 2, pady = 2)
paste_from_clipboard.grid(row=16,column=1)
copy_to_clipboard = Button(master, text="Copy to Clipboard", command=self.copyToClipboard, padx = 2, pady = 2)
copy_to_clipboard.grid(row=17,column=1)
# positioning the tk widgets
self.title.grid(row=0,column=1)
self.text_raw.grid(row=1,column=0,rowspan=18)
self.text_texed.grid(row=1,column=2,rowspan=18)
self.format_lab.grid(row=1,column=1)
self.format_list.grid(row=2,column=1)
self.style_lab.grid(row=3,column=1)
self.style_list.grid(row=4,column=1)
self.math_check.grid(row=5,column=1)
self.convert_button.grid(row=15,column=1)
# menu
menubar = Menu(master)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="Open", command=self.openFile)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=master.destroy)
menubar.add_cascade(label="File", menu=filemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About...", command=self.showAbout)
menubar.add_cascade(label="Help", menu=helpmenu)
master.config(menu=menubar)
def selectall(self, event):
event.widget.tag_add("sel","1.0","end")
def openFile(self):
self.text_raw.delete(1.0,END)
f = askopenfile(mode='r')
# clear the unwanted \r
f_text = f.read().replace('\r','')
self.text_raw.insert(END,f_text)
f.close()
def pasteFromClipboard(self):
""" paste from clipboard
"""
self.text_raw.delete(1.0,END)
self.text_raw.insert(END, self.master.clipboard_get())
def copyToClipboard(self):
""" copy to clipboard
"""
self.master.clipboard_clear()
self.master.clipboard_append(self.text_texed.get("1.0", 'end-1c'))
def showAbout(self):
""" show about info
"""
tkMessageBox.showinfo("About", "Made by Jingnan Shi - jshi@g.hmc.edu")
def convert(self):
""" function called when convert is called
"""
print "converting ........."
# update the table
print "Source format: " + self.getSelectedFormat()
self.table.update(self.getRawString(), self.getSelectedFormat())
# get the latex table
print "Target style: " + self.getSelectedStyle()
latex_table = self.table.getLaTeX(self.getSelectedStyle(),self.math_var.get())
# update gui
self.updateTargetText(latex_table)
def getRawString(self):
""" get the raw table string in the app
"""
raw = self.text_raw.get("1.0", 'end-1c')
return raw
def updateTargetText(self, string):
""" update the text on the finished table text
"""
self.text_texed.config(state=NORMAL)
self.text_texed.delete(1.0, END)
self.text_texed.insert(END, string)
def getSelectedFormat(self):
""" return the GUI selected raw data format
"""
try:
# a bug in Tkinter 1.160 (Python 2.2) and earlier versions causes
# this list to be returned as a list of strings, instead of integers
selected_list = map(int, self.format_list.curselection())
format_index = selected_list[0]
raw_format = self.formats[format_index]
return raw_format
except IndexError:
print "Format not selected!"
return
def getSelectedStyle(self):
""" return the GUI selected latex style
"""
try:
# a bug in Tkinter 1.160 (Python 2.2) and earlier versions causes
# this list to be returned as a list of strings, instead of integers
selected_list = map(int, self.style_list.curselection())
style_index = selected_list[0]
style = self.styles[style_index]
return style
except IndexError:
print "Style not selected!"
return style
| |
'''
This file is part of LIO(tm).
Copyright (c) 2012-2014 by Datera, Inc.
More information on www.datera.io.
Original author: Jerome Martin <jxm@netiant.com>
Datera and LIO are trademarks of Datera, Inc., which may be registered in some
jurisdictions.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
import os, re, time, copy, logging
from rtslib.utils import is_valid_wwn, list_eth_ips, fread
from config_filters import *
from config_tree import ConfigTree, NO_VALUE
from config_parser import ConfigParser, PolicyParser, PatternParser
DEBUG = False
if DEBUG:
logging.basicConfig()
log = logging.getLogger('Config')
log.setLevel(logging.DEBUG)
else:
log = logging.getLogger('Config')
log.setLevel(logging.INFO)
# FIXME validate_* and _load_parse_tree are a mess !!!
# TODO Implement resync() to reload both policy and configfs state
# TODO Add class_match_ids (objs) and name_match_value (attrs) to search etc.
# Use it to simplify all "%s .*" tricks in cli
# TODO Implement commit_live()
# TODO Custom defaults load
# TODO Add copy() operation
def dump_value(string):
if string == NO_VALUE:
return NO_VALUE
for char in " ~\t{}#',;":
if char in string:
return '"%s"' % string
if '"' in string:
return "'%s'" % string
elif not string:
return '""'
else:
return string
def key_to_string(key):
strings = []
for item in key:
strings.append(dump_value(item))
return " ".join(strings)
def is_valid_backend(value, parent):
cur = parent
while cur.parent is not None:
cur = cur.parent
(backend, _, disk) = value.partition(':')
if cur.search([("storage", backend), ("disk", disk)]):
return True
else:
return False
def sort_key(node):
'''
A sort key for configuration nodes, that ensures nodes potentially
referenced in the config come first: storage before fabric and lun
objects before acl objects. Also, attributes will be sorted before
objects, so that configuration dumps are easier to read, with simple
attributes coming before attribute groups.
'''
node_type = node.data['type']
obj_classes = ConfigParser.obj_classes
ordered_obj = {}
for k, v in enumerate(obj_classes.split()):
ordered_obj[v] = "%s%s" % (k, v)
if node_type == 'attr':
key = ('0', node.key[0], node.key[1])
elif node_type == 'group':
key = ('1', node.key[0])
elif node_type == 'obj':
key = ('2', ordered_obj.get(node.key[0], node.key[0]), node.key[1])
else:
raise ConfigError("Unknown configuration node type %s for %s"
% (node_type, node))
return key
class ConfigError(Exception):
pass
class Config(object):
'''
The LIO configuration API.
The Config object provide methods to edit, search, validate and update the
current configuration, and commit that configuration to the live system on
request.
It features pattern-matching search for all configuration objects and
attributes as well as multi-level undo capabilities. In addition, all
configuration changes are staged before being applied, isolating the
current configuration from load-time and validation errors.
'''
policy_dir = "/var/target/policy"
def __init__(self):
data = {'source': {'operation': 'init', 'timestamp': time.time()},
'type': 'root',
'policy_path': []}
self.policy = ConfigTree(data, sort_key, key_to_string)
self.reference = ConfigTree(data, sort_key, key_to_string)
self._parser = ConfigParser()
self._policy_parser = PolicyParser()
self._pattern_parser = PatternParser()
self._configs = [ConfigTree(data, sort_key, key_to_string)]
self._load_policy()
def _load_policy(self):
'''
Loads all LIO system policy files.
'''
filepaths = ["%s/%s" % (self.policy_dir, path)
for path in os.listdir(self.policy_dir)
if path.endswith(".lio")]
for filepath in filepaths:
log.debug('Loading policy file %s' % filepath)
parse_tree = self._policy_parser.parse_file(filepath)
source = {'operation': 'load',
'filepath': filepath,
'timestamp': time.time(),
'mtime': os.path.getmtime(filepath)}
self._load_parse_tree(parse_tree, replace=False,
source=source, target='policy')
def _load_parse_tree(self, parse_tree, cur_stage=None,
replace=False, source=None,
target='config', allow_new_attrs=False):
'''
target can be 'config', 'policy' or 'reference'
'''
# TODO accept 'defaults' target too
if source is None:
source = {}
if cur_stage is None:
update_target = True
if replace:
data = {'source': source, 'policy_path': [], 'type': 'root'}
stage = ConfigTree(data, sort_key, key_to_string)
elif target == 'config':
stage = self.current.get_clone()
stage.data['source'] = source
elif target == 'policy':
stage = self.policy.get_clone()
stage.data['source'] = source
elif target == 'reference':
stage = self.reference.get_clone()
stage.data['source'] = source
else:
update_target = False
stage = cur_stage
loaded = []
log.debug("Loading parse tree %s" % parse_tree)
for statement in parse_tree:
cur = stage
log.debug("Visiting statement %s" % statement)
for token in statement:
token['source'] = source
log.debug("Visiting token %s" % token)
if token['type'] == 'obj':
log.debug("Loading obj token: %s" % token)
if target != 'policy':
token = self.validate_obj(token, cur)
old = cur.get(token['key'])
cur = cur.cine(token['key'], token)
if not old:
loaded.append(cur)
if target != 'policy':
self._add_missing_attributes(cur)
log.debug("Added object %s" % cur.path)
elif token['type'] == 'attr':
log.debug("Loading attr token: %s" % token)
if target != 'policy':
token = self.validate_attr(token, cur, allow_new_attrs)
old_nodes = cur.search([(token['key'][0], ".*")])
for old_node in old_nodes:
log.debug("Deleting old value: %s\nnew is: %s"
% (old_node.path, str(token['key'])))
deleted = cur.delete([old_node.key])
log.debug("Deleted: %s" % str(deleted))
cur = cur.cine(token['key'], token)
if old_nodes and old_nodes[0].key != cur.key:
loaded.append(cur)
log.debug("Added attribute %s" % cur.path)
elif token['type'] == 'group':
log.debug("Loading group token: %s" % token)
if target != 'policy':
log.debug("cur '%s' token '%s'" % (cur, token))
token['policy_path'] = (cur.data['policy_path']
+ [(token['key'][0],)])
old = cur.get(token['key'])
cur = cur.cine(token['key'], token)
if not old:
loaded.append(cur)
elif token['type'] == 'block':
log.debug("Loading block token: %s" % token)
for statement in token['statements']:
log.debug("_load_parse_tree recursion on block "
"statement: %s" % [statement])
loaded.extend(self._load_parse_tree(
[statement], cur, source=source,
target=target, allow_new_attrs=allow_new_attrs))
if update_target:
if target == 'config':
self.current = stage
elif target == 'policy':
self.policy = stage
elif target == 'reference':
self.reference = stage
return loaded
def _add_missing_attributes(self, obj):
'''
Given an obj node, add all missing attributes and attribute groups in
the configuration.
'''
source = {'operation': 'auto', 'timestamp': time.time()}
policy_root = self.policy.get_path(obj.data['policy_path'])
for policy_node in [node for node in policy_root.nodes
if node.data['type'] == 'attr']:
attr = obj.search([(policy_node.key[0], ".*")])
if not attr:
key = (policy_node.key[0], policy_node.data.get('val_dfl'))
data = {'key': key, 'type': 'attr', 'source': source,
'val_dfl': policy_node.data.get('val_dfl'),
'val_type': policy_node.data['val_type'],
'required': key[1] is None,
'policy_path': policy_node.path}
log.debug("obj.set(%s, %s)" % (str(key), data))
obj.set(key, data)
groups = []
for policy_node in [node for node in policy_root.nodes
if node.data['type'] == 'group']:
group = obj.get((policy_node.key[0],))
if not group:
key = (policy_node.key[0],)
data = {'key': key, 'type': 'group', 'source': source,
'policy_path': policy_node.path}
groups.append(obj.set(key, data))
else:
groups.append(group)
for group in groups:
policy_root = self.policy.get_path(group.data['policy_path'])
for policy_node in [node for node in policy_root.nodes
if node.data['type'] == 'attr']:
attr = group.search([(policy_node.key[0], ".*")])
if not attr:
key = (policy_node.key[0], policy_node.data.get('val_dfl'))
data = {'key': key, 'type': 'attr', 'source': source,
'val_dfl': policy_node.data.get('val_dfl'),
'val_type': policy_node.data['val_type'],
'required': key[1] is None,
'policy_path': policy_node.path}
group.set(key, data)
def validate_val(self, value, val_type, parent=None):
valid_value = None
log.debug("validate_val(%s, %s)" % (value, val_type))
if value == NO_VALUE:
return None
if val_type == 'bool':
if value.lower() in ['yes', 'true', '1', 'enable']:
valid_value = 'yes'
elif value.lower() in ['no', 'false', '0', 'disable']:
valid_value = 'no'
elif val_type == 'bytes':
match = re.match(r'(\d+(\.\d*)?)([kKMGT]?B?$)', value)
if match:
qty = str(float(match.group(1)))
unit = match.group(3).upper()
if not unit.endswith('B'):
unit += 'B'
valid_value = "%s%s" % (qty, unit)
elif val_type == 'int':
try:
valid_value = str(int(value))
except:
pass
elif val_type == 'ipport':
(addr, _, port) = value.rpartition(":")
try:
str(int(port))
except:
pass
else:
try:
listen_all = int(addr.replace(".", "")) == 0
except:
listen_all = False
if listen_all:
valid_value = "0.0.0.0:%s" % port
elif addr in list_eth_ips():
valid_value = value
elif val_type == 'posint':
try:
val = int(value)
except:
pass
else:
if val > 0:
valid_value = value
elif val_type == 'str':
valid_value = str(value)
forbidden = "*?[]"
for char in forbidden:
if char in valid_value:
valid_value = None
break
elif val_type == 'erl':
if value in ["0", "1", "2"]:
valid_value = value
elif val_type == 'iqn':
if is_valid_wwn('iqn', value):
valid_value = value
elif val_type == 'naa':
if is_valid_wwn('naa', value):
valid_value = value
elif val_type == 'hw_wwn':
valid_value = value
elif val_type == 'backend':
if is_valid_backend(value, parent):
valid_value = value
else:
raise ConfigError("Unknown value type '%s' when validating %s"
% (val_type, value))
log.debug("validate_val(%s) is a valid %s: %s"
% (value, val_type, valid_value))
return valid_value
def validate_obj(self, token, parent):
log.debug("validate_obj(%s, %s)" % (token, parent.data))
policy_search = parent.data['policy_path'] + [(token['key'][0], ".*")]
policy_nodes = self.policy.search(policy_search)
valid_token = copy.deepcopy(token)
expected_val_types = set()
for policy_node in policy_nodes:
id_fixed = policy_node.data['id_fixed']
id_type = policy_node.data['id_type']
if id_fixed is not None:
expected_val_types.add("'%s'" % id_fixed)
if id_fixed == token['key'][1]:
valid_token['policy_path'] = policy_node.path
return valid_token
else:
expected_val_types.add(id_type)
valid_value = self.validate_val(valid_token['key'][1], id_type)
if valid_value is not None:
valid_token['key'] = (valid_token['key'][0], valid_value)
valid_token['policy_path'] = policy_node.path
return valid_token
if not policy_nodes:
obj_type = ("%s %s" % (parent.path_str, token['key'][0])).strip()
raise ConfigError("Unknown object type: %s" % obj_type)
else:
raise ConfigError("Invalid %s identifier '%s': expected type %s"
% (token['key'][0],
token['key'][1],
", ".join(expected_val_types)))
def validate_attr(self, token, parent, allow_new_attr=False):
log.debug("validate_attr(%s, %s)" % (token, parent.data))
if token['key'][1] is None:
return token
policy_search = parent.data['policy_path'] + [(token['key'][0], ".*")]
policy_nodes = self.policy.search(policy_search)
valid_token = copy.deepcopy(token)
expected_val_types = set()
for policy_node in policy_nodes:
ref_path = policy_node.data['ref_path']
valid_token['required'] = policy_node.data['required']
valid_token['comment'] = policy_node.data['comment']
valid_token['val_dfl'] = policy_node.data.get('val_dfl')
valid_token['val_type'] = policy_node.data['val_type']
if ref_path is not None:
root = parent
if ref_path.startswith('-'):
(upno, _, down) = ref_path[1:].partition(' ')
for i in range(int(upno) - 1):
root = root.parent
else:
while not root.is_root:
root = root.parent
search_path = [(down, token['key'][1])]
nodes = root.search(search_path)
if len(nodes) == 1:
valid_token['ref_path'] = nodes[0].path_str
return valid_token
elif len(nodes) == 0:
raise ConfigError("Invalid reference for attribute %s: %s"
% (token['key'][0], search_path))
else:
raise ConfigError("Unexpected reference error, got: %s"
% nodes)
return valid_token
else:
expected_val_types.add(policy_node.data['val_type'])
if valid_token['key'][1] == NO_VALUE:
valid_value = NO_VALUE
else:
valid_value = \
self.validate_val(valid_token['key'][1],
policy_node.data['val_type'],
parent=parent)
if valid_value is not None:
valid_token['key'] = (valid_token['key'][0], valid_value)
return valid_token
if not policy_nodes:
if allow_new_attr:
valid_token['required'] = False
valid_token['comment'] = "Unknown"
valid_token['val_dfl'] = valid_token['key'][1]
valid_token['val_type'] = "raw"
valid_token['ref_path'] = None
return valid_token
else:
attr_name = ("%s %s"
% (parent.path_str, token['key'][0])).strip()
raise ConfigError("Unknown attribute: %s" % attr_name)
else:
raise ConfigError("Invalid %s value '%s': expected type %s"
% (token['key'][0],
token['key'][1],
", ".join(expected_val_types)))
@property
def current(self):
return self._configs[-1]
@current.setter
def current(self, config_tree):
self._configs.append(config_tree)
def undo(self):
'''
Restores the previous state of the configuration, before the last set,
load, delete, update or clear operation. If there is nothing to undo, a
ConfigError exception will be raised.
'''
if len(self._configs) < 2:
raise ConfigError("Nothing to undo")
else:
self._configs.pop()
def set(self, configuration):
'''
Evaluates the configuration (a string in LIO configuration format) and
sets the relevant objects, attributes and atttribute groups.
Existing attributes and objects will be updated if needed and new ones
will be added.
The list of created configuration nodes will be returned.
If an error occurs, the operation will be aborted, leaving the current
configuration intact.
'''
parse_tree = self._parser.parse_string(configuration)
source = {'operation': 'set',
'data': configuration,
'timestamp': time.time()}
return self._load_parse_tree(parse_tree, source=source)
def delete(self, pattern, node_filter=lambda x:x):
'''
Deletes all configuration objects and attributes whose paths match the
pattern, along with their children.
The pattern is a single LIO configuration statement without any block,
where object identifiers, attributes names, attribute values and
attribute groups are regular expressions patterns. Object types have to
use their exact string representation to match.
node_filter is a function applied to each node before returning it:
node_filter(node_in) -> node_out | None (aka filtered out)
Returns a list of all deleted nodes.
If an error occurs, the operation will be aborted, leaving the current
configuration intact.
'''
path = [token for token in
self._pattern_parser.parse_string(pattern)]
log.debug("delete(%s)" % pattern)
source = {'operation': 'delete',
'pattern': pattern,
'timestamp': time.time()}
stage = self.current.get_clone()
stage.data['source'] = source
deleted = []
for node in stage.search(path, node_filter):
log.debug("delete() found node %s" % node)
deleted.append(stage.delete(node.path))
self.current = stage
return deleted
def load(self, filepath, allow_new_attrs=False):
'''
Loads an LIO configuration file and replace the current configuration
with it.
All existing objects and attributes will be deleted, and new ones will
be added.
If an error occurs, the operation will be aborted, leaving the current
configuration intact.
'''
for c in fread(filepath):
if c not in ["\n", "\t", " "]:
parse_tree = self._parser.parse_file(filepath)
source = {'operation': 'load',
'filepath': filepath,
'timestamp': time.time(),
'mtime': os.path.getmtime(filepath)}
self._load_parse_tree(parse_tree, replace=True,
source=source, allow_new_attrs=allow_new_attrs)
break
def load_live(self):
'''
Loads the live-running configuration.
'''
from config_live import dump_live
live = dump_live()
parse_tree = self._parser.parse_string(live)
source = {'operation': 'resync',
'timestamp': time.time()}
self._load_parse_tree(parse_tree, replace=True,
source=source, allow_new_attrs=True)
def update(self, filepath):
'''
Updates the current configuration with the contents of an LIO
configuration file.
Existing attributes and objects will be updated if needed and new ones
will be added.
If an error occurs, the operation will be aborted, leaving the current
configuration intact.
'''
parse_tree = self._parser.parse_file(filepath)
source = {'operation': 'update',
'filepath': filepath,
'timestamp': time.time(),
'mtime': os.path.getmtime(filepath)}
self._load_parse_tree(parse_tree, source=source)
def clear(self):
'''
Clears the current configuration.
This removes all current objects and attributes from the configuration.
'''
source = {'operation': 'clear',
'timestamp': time.time()}
self.current = ConfigTree({'source': source}, sort_key, key_to_string)
def search(self, search_statement, node_filter=lambda x:x):
'''
Returns a list of nodes matching the search_statement, relative to the
current node, or an empty list if no match was found.
The search_statement is a single LIO configuration statement without
any block, where object identifiers, attributes names, attribute values
and attribute groups are regular expressions patterns. Object types
have to use their exact string representation to match.
node_filter is a function applied to each node before returning it:
node_filter(node_in) -> node_out | None (aka filtered out)
'''
path = [token for token in
self._pattern_parser.parse_string(search_statement)]
return self.current.search(path, node_filter)
def dump(self, search_statement=None, node_filter=lambda x:x):
'''
Returns a LIO configuration file format dump of the nodes matching
the search_statement, or of all nodes if search_statement is None.
The search_statement is a single LIO configuration statement without
any block, where object identifiers, attributes names, attribute values
and attribute groups are regular expressions patterns. Object types
have to use their exact string representation to match.
node_filter is a function applied to each node before dumping it:
node_filter(node_in) -> node_out | None (aka filtered out)
'''
# FIXME: Breaks with filter_only_missing
if not search_statement:
root_nodes = [self.current]
else:
root_nodes = self.search(search_statement, node_filter)
if root_nodes:
parts = []
for root_node_in in root_nodes:
root_node = node_filter(root_node_in)
if root_node is None:
break
dump = ''
if root_node.key_str:
dump = "%s " % root_node.key_str
nodes = root_node.nodes
if root_node.is_root or len(nodes) == 1:
for node in nodes:
section = self.dump(node.path_str, node_filter)
if section:
dump += section
elif len(nodes) > 1:
dump += "{\n"
for node in nodes:
section = self.dump(node.path_str, node_filter)
if section is not None:
lines = section.splitlines()
else:
lines = []
dump += "\n".join(" %s" % line
for line in lines if line)
dump += "\n"
dump += "}\n"
parts.append(dump)
dump = "\n".join(parts)
if dump.strip():
return dump
def save(self, filepath, pattern=None):
'''
Saves the current configuration to filepath, using LIO configuration
file format. If path is not None, only objects and attributes starting
at path and hanging under it will be saved.
For convenience, the saved configuration will also be returned as a
string.
The pattern is a whitespace-separated string of regular expressions,
each of which will be matched against configuration objects and
attributes. In case of dump, the pattern must be non-ambiguous and
match only a single configuration node.
If the pattern matches either zero or more than one configuration
nodes, a ConfigError exception will be raised.
'''
dump = self.dump(pattern, filter_no_missing)
if dump is None:
dump = ''
with open(filepath, 'w') as f:
f.write(dump)
return dump
def verify(self):
'''
Validates the configuration for the following points:
- Portal IP Addresses exist
- Devices and file paths exist
- Files for fileio exist
- No required attributes are missing
- References are correct
Returns a dictionary of validation_test: [errors]
'''
return {}
def apply(self, brute_force=True):
'''
Applies the configuration to the live system:
- Remove objects absent from the configuration and objects in the
configuration with different required attributes
- Create new storage objects
- Create new fabric objects
- Update relevant storage objects
- Update relevant fabric objects
'''
from config_live import apply_create_obj, apply_delete_obj
if brute_force:
from config_live import apply_create_obj, clear_configfs
yield "[clear] delete all live objects"
clear_configfs()
for obj in self.current.walk(get_filter_on_type(['obj'])):
yield("[create] %s" % obj.path_str)
apply_create_obj(obj)
else:
# TODO for minor_obj, update instead of create/delete
diff = self.diff_live()
delete_list = diff['removed'] + diff['major_obj'] + diff['minor_obj']
delete_list.reverse()
for obj in delete_list:
yield "[delete] %s" % obj.path_str
apply_delete_obj(obj)
for obj in diff['created'] + diff['major_obj'] + diff['minor_obj']:
yield "[create] %s" % obj.path_str
apply_create_obj(obj)
def diff_live(self):
'''
Returns a diff between the current configuration and the live
configuration as a reference.
'''
from config_live import dump_live
parse_tree = self._parser.parse_string(dump_live())
source = {'operation': 'load',
'timestamp': time.time()}
self._load_parse_tree(parse_tree, replace=True,
source=source, target='reference',
allow_new_attrs=True)
return self.diff()
def diff(self):
'''
Computes differences between a valid current configuration and a
previously loaded valid reference configuration.
Returns a dict of:
- 'removed': list of removed objects
- 'major': list of changed required attributes
- 'major_obj': list of obj with major changes
- 'minor': list of changed non-required attributes
- 'major_obj': list of obj with minor changes
- 'created': list of new objects in the current configuration
'''
# FIXME data['required'] check should be enough without NO_VALUE check
# FIXME Can't we just pass the reference config instead of having to preload it?
diffs = {}
keys = ('removed', 'major', 'major_obj',
'minor', 'minor_obj', 'created')
for key in keys:
diffs[key] = []
for obj in self.current.walk(get_filter_on_type(['obj'])):
if not self.reference.get_path(obj.path):
diffs['created'].append(obj)
for obj in self.reference.walk(get_filter_on_type(['obj'])):
if not self.current.get_path(obj.path):
diffs['removed'].append(obj)
for obj in self.current.walk(get_filter_on_type(['obj'])):
if self.reference.get_path(obj.path):
for node in obj.nodes:
if node.data['type'] == 'attr' \
and (node.data['required'] \
or node.key[1] == NO_VALUE):
if not self.reference.get_path(node.path):
diffs['major'].append(node)
diffs['major_obj'].append(node.parent)
for obj in self.current.walk(get_filter_on_type(['obj'])):
if self.reference.get_path(obj.path):
for node in obj.nodes:
if node.data['type'] == 'attr' \
and not node.data['required'] \
and node.key[1] != NO_VALUE:
if not self.reference.get_path(node.path):
diffs['minor'].append(node)
if node.parent not in diffs['minor_obj'] \
and node.parent not in diffs['major_obj']:
diffs['minor_obj'].append(node.parent)
elif node.data['type'] == 'group':
for attr in node.nodes:
if attr.data['type'] == 'attr' \
and not attr.data['required'] \
and attr.key[1] != NO_VALUE:
if not self.reference.get_path(attr.path):
diffs['minor'].append(attr)
if node.parent not in diffs['minor_obj'] \
and node.parent not in diffs['major_obj']:
diffs['minor_obj'].append(node.parent)
return diffs
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import asyncio
from struct import unpack
from _shaded_thriftpy.protocol.exc import TProtocolException
from _shaded_thriftpy.thrift import TException, TType
from _shaded_thriftpy.protocol.compact import (
from_zig_zag,
CompactType,
TCompactProtocol,
)
from .base import TAsyncProtocolBase
BIN_TYPES = (TType.STRING, TType.BINARY)
@asyncio.coroutine
def read_varint(trans):
result = 0
shift = 0
while True:
x = yield from trans.read(1)
byte = ord(x)
result |= (byte & 0x7f) << shift
if byte >> 7 == 0:
return result
shift += 7
class TAsyncCompactProtocol(TCompactProtocol, # Inherit all of the writing
TAsyncProtocolBase):
"""Compact implementation of the Thrift protocol driver."""
PROTOCOL_ID = 0x82
VERSION = 1
VERSION_MASK = 0x1f
TYPE_MASK = 0xe0
TYPE_BITS = 0x07
TYPE_SHIFT_AMOUNT = 5
@asyncio.coroutine
def _read_size(self):
result = yield from read_varint(self.trans)
if result < 0:
raise TException("Length < 0")
return result
@asyncio.coroutine
def read_message_begin(self):
proto_id = yield from self._read_ubyte()
if proto_id != self.PROTOCOL_ID:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad protocol id in the message: %d'
% proto_id)
ver_type = yield from self._read_ubyte()
type = (ver_type >> self.TYPE_SHIFT_AMOUNT) & self.TYPE_BITS
version = ver_type & self.VERSION_MASK
if version != self.VERSION:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad version: %d (expect %d)'
% (version, self.VERSION))
seqid = yield from read_varint(self.trans)
name = yield from self._read_string()
return name, type, seqid
@asyncio.coroutine
def read_message_end(self): # TAsyncClient expects coroutine
assert len(self._structs) == 0
@asyncio.coroutine
def _read_field_begin(self):
type = yield from self._read_ubyte()
if type & 0x0f == TType.STOP:
return None, 0, 0
delta = type >> 4
if delta == 0:
fid = from_zig_zag((yield from read_varint(self.trans)))
else:
fid = self._last_fid + delta
self._last_fid = fid
type = type & 0x0f
if type == CompactType.TRUE:
self._bool_value = True
elif type == CompactType.FALSE:
self._bool_value = False
return None, self._get_ttype(type), fid
def _read_field_end(self):
pass
def _read_struct_begin(self):
self._structs.append(self._last_fid)
self._last_fid = 0
def _read_struct_end(self):
self._last_fid = self._structs.pop()
@asyncio.coroutine
def _read_map_begin(self):
size = yield from self._read_size()
types = 0
if size > 0:
types = yield from self._read_ubyte()
vtype = self._get_ttype(types)
ktype = self._get_ttype(types >> 4)
return ktype, vtype, size
@asyncio.coroutine
def _read_collection_begin(self):
size_type = yield from self._read_ubyte()
size = size_type >> 4
type = self._get_ttype(size_type)
if size == 15:
size = yield from self._read_size()
return type, size
def _read_collection_end(self):
pass
@asyncio.coroutine
def _read_byte(self):
result, = unpack('!b', (yield from self.trans.read(1)))
return result
@asyncio.coroutine
def _read_ubyte(self):
result, = unpack('!B', (yield from self.trans.read(1)))
return result
@asyncio.coroutine
def _read_int(self):
return from_zig_zag((yield from read_varint(self.trans)))
@asyncio.coroutine
def _read_double(self):
buff = yield from self.trans.read(8)
val, = unpack('<d', buff)
return val
@asyncio.coroutine
def _read_binary(self):
length = yield from self._read_size()
return (yield from self.trans.read(length))
@asyncio.coroutine
def _read_string(self):
len = yield from self._read_size()
byte_payload = yield from self.trans.read(len)
if self.decode_response:
try:
byte_payload = byte_payload.decode('utf-8')
except UnicodeDecodeError:
pass
return byte_payload
@asyncio.coroutine
def _read_bool(self):
if self._bool_value is not None:
result = self._bool_value
self._bool_value = None
return result
return (yield from self._read_byte()) == CompactType.TRUE
@asyncio.coroutine
def read_struct(self, obj):
self._read_struct_begin()
while True:
fname, ftype, fid = yield from self._read_field_begin()
if ftype == TType.STOP:
break
if fid not in obj.thrift_spec:
yield from self.skip(ftype)
continue
try:
field = obj.thrift_spec[fid]
except IndexError:
yield from self.skip(ftype)
raise
else:
if field is not None and \
(ftype == field[0]
or (ftype in BIN_TYPES
and field[0] in BIN_TYPES)):
fname = field[1]
fspec = field[2]
val = yield from self._read_val(field[0], fspec)
setattr(obj, fname, val)
else:
yield from self.skip(ftype)
self._read_field_end()
self._read_struct_end()
@asyncio.coroutine
def _read_val(self, ttype, spec=None):
if ttype == TType.BOOL:
return (yield from self._read_bool())
elif ttype == TType.BYTE:
return (yield from self._read_byte())
elif ttype in (TType.I16, TType.I32, TType.I64):
return (yield from self._read_int())
elif ttype == TType.DOUBLE:
return (yield from self._read_double())
elif ttype == TType.BINARY:
return (yield from self._read_binary())
elif ttype == TType.STRING:
return (yield from self._read_string())
elif ttype in (TType.LIST, TType.SET):
if isinstance(spec, tuple):
v_type, v_spec = spec[0], spec[1]
else:
v_type, v_spec = spec, None
result = []
r_type, sz = yield from self._read_collection_begin()
for i in range(sz):
result.append((yield from self._read_val(v_type, v_spec)))
self._read_collection_end()
return result
elif ttype == TType.MAP:
if isinstance(spec[0], int):
k_type = spec[0]
k_spec = None
else:
k_type, k_spec = spec[0]
if isinstance(spec[1], int):
v_type = spec[1]
v_spec = None
else:
v_type, v_spec = spec[1]
result = {}
sk_type, sv_type, sz = yield from self._read_map_begin()
if sk_type != k_type or sv_type != v_type:
for _ in range(sz):
yield from self.skip(sk_type)
yield from self.skip(sv_type)
self._read_collection_end()
return {}
for i in range(sz):
k_val = yield from self._read_val(k_type, k_spec)
v_val = yield from self._read_val(v_type, v_spec)
result[k_val] = v_val
self._read_collection_end()
return result
elif ttype == TType.STRUCT:
obj = spec()
yield from self.read_struct(obj)
return obj
@asyncio.coroutine
def skip(self, ttype):
if ttype == TType.STOP:
return
elif ttype == TType.BOOL:
yield from self._read_bool()
elif ttype == TType.BYTE:
yield from self._read_byte()
elif ttype in (TType.I16, TType.I32, TType.I64):
from_zig_zag((yield from read_varint(self.trans)))
elif ttype == TType.DOUBLE:
yield from self._read_double()
elif ttype == TType.BINARY:
yield from self._read_binary()
elif ttype == TType.STRING:
yield from self._read_string()
elif ttype == TType.STRUCT:
self._read_struct_begin()
while True:
name, ttype, id = yield from self._read_field_begin()
if ttype == TType.STOP:
break
yield from self.skip(ttype)
self._read_field_end()
self._read_struct_end()
elif ttype == TType.MAP:
ktype, vtype, size = yield from self._read_map_begin()
for i in range(size):
yield from self.skip(ktype)
yield from self.skip(vtype)
self._read_collection_end()
elif ttype == TType.SET:
etype, size = yield from self._read_collection_begin()
for i in range(size):
yield from self.skip(etype)
self._read_collection_end()
elif ttype == TType.LIST:
etype, size = yield from self._read_collection_begin()
for i in range(size):
yield from self.skip(etype)
self._read_collection_end()
class TAsyncCompactProtocolFactory(object):
def __init__(self, decode_response=True):
self.decode_response = decode_response
def get_protocol(self, trans):
return TAsyncCompactProtocol(
trans,
decode_response=self.decode_response,
)
| |
#
# IIS_DataListener.py -- IIS (XImtool protocol) server
#
# Eric Jeschke (eric@naoj.org)
#
# This file contains code by "fpierfed" (email addr unknown) downloaded from:
# http://pyimtool.cvs.sourceforge.net/viewvc/pyimtool/pyimtool/src/
# and modified.
#
# Modifications Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import sys, os
import socket, select
import threading
import logging
import time
import struct
import array
import re
import string
from ginga.misc import Bunch
import ginga.util.six as six
if six.PY2:
import SocketServer
else:
import socketserver as SocketServer
# internal globals
MEMORY = 0o1 # frame buffer i/o
LUT = 0o2 # lut i/o
FEEDBACK = 0o5 # used for frame clears
IMCURSOR = 0o20 # logical image cursor
WCS = 0o21 # used to set WCS
IIS_VERSION = 10 # version 1.0
PACKED = 0o040000
COMMAND = 0o100000
IIS_READ = 0o100000
IMC_SAMPLE = 0o040000
IMT_FBCONFIG = 0o77
XYMASK = 0o77777
MAX_FBCONFIG = 128 # max possible frame buf sizes
MAX_FRAMES = 15 # max number of frames (start from 0)
MAX_CLIENTS = 8 # max display server clients
DEF_NFRAMES = 1 # save memory; only one frame
DEF_FRAME_WIDTH = 512 # 512 square frame
DEF_FRAME_HEIGHT = 512 # 512 square frame
SZ_LABEL = 256 # main frame label string
SZ_IMTITLE = 128 # image title string
SZ_WCSBUF = 1024 # WCS text buffer size
SZ_OLD_WCSBUF = 320 # old WCS text buffer size
SZ_FIFOBUF = 4000 # transfer size for FIFO i/o
SZ_FNAME = 256
SZ_LINE = 256
SZ_IMCURVAL = 160
# WCS definitions.
W_UNITARY = 0
W_LINEAR = 1
W_LOG = 2
W_DEFFORMAT = " %7.2f %7.2f %7.1f%c"
VERBOSE = 1
class socketTimeout(Exception):
pass
class IIS_DataListener(object):
"""
A class that listens to a socket/fifo for incoming data.
It uses the XImtool protocol (libiio.a).
"""
def __init__(self, addr, name='DataListener',
controller=None, ev_quit=None, logger=None):
self.timeout = 0.5
self.addr = addr
self.nconnections = 5
if (addr.prot == 'inet'):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s_address = (addr.host, addr.port)
s_address = ('', addr.port)
else:
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s_address = addr.path
# attach a RequestHandler to the server
self.RequestHandlerClass = IIS_RequestHandler
# attach the interface controller class we will call this
# class' display_data() method.
self.controller = controller
# Controlled stop of server
if ev_quit is None:
ev_quit = threading.Event()
self.ev_quit = ev_quit
if logger is None:
logger = logging.getLogger(name)
self.logger = logger
# allow reuse of the socket
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.settimeout(self.timeout)
# bind the socket and start listening
self.socket.bind(s_address)
self.socket.listen(self.nconnections)
# so we don't have to block indefinitely on the socket.accept() method.
# See SocketServer.py
def get_request(self):
while not self.ev_quit.isSet():
#self.logger.debug("Ready to accept request, socket %s" % (
# str(self.socket)))
inputs = [ self.socket ]
try:
(sin, sout, sexp) = select.select(inputs, [], [], self.timeout)
except KeyboardInterrupt as e:
raise e
except select.error as e:
self.logger.error("select.error: %s" % str(e))
(code, msg) = e
# code==4 is interrupted system call. This typically happens
# when the process receives a signal.
if code == 4:
raise socketTimeout('select() timed out, system call interrupted')
raise e
for i in sin:
if i == self.socket:
conn = self.socket.accept()
# wierd hack dues to Solaris 10 handling of sockets
conn[0].setblocking(1)
return conn
# Normal timeout, nothing to do.
raise socketTimeout('select() timed out')
def handle_request(self):
"""
Handles incoming connections, one at the time.
"""
try:
(request, client_address) = self.get_request()
except socket.error as e:
# Error handling goes here.
self.logger.error("error opening the connection: %s" % (
str(e)))
for exctn in sys.exc_info():
print (exctn)
return
try:
self.RequestHandlerClass(request, client_address, self)
except Exception as e:
# Error handling goes here.
self.logger.error('error handling the request: %s' % (
str(e)))
for exctn in sys.exc_info():
print (exctn)
return
def mainloop(self):
"""main control loop."""
try:
while (not self.ev_quit.isSet()):
try:
self.handle_request()
except socketTimeout:
continue
finally:
self.socket.close()
def stop(self):
self.ev_quit.set()
self.logger.info("stop() invoked on IIS DataListener.")
if (self.addr.prot == 'unix'):
try:
os.remove(self.addr.path)
except Exception as e:
self.logger.error("Failed to cleanup the pipe " + self.addr.path +
(": %s" % (str(e))))
class IIS_RequestHandler(SocketServer.StreamRequestHandler):
"""
This class does the actual work of parsing the incoming streams and
perform the necessary actions (display image, overlay regions
and so on).
IIS Header Packet Summary
TID Subunit Tct X Y Z T Data
+------------------+-------------+-----+---+---+----+---+--------+
Read Data | IIS_READ|PACKED | MEMORY | -NB | x | y | fr | - | nbytes |
Write Data | IIS_WRITE|PACKED | MEMORY | -NB | x | y | fr | - | nbytes |
Read Cursor | IIS_READ | IMCURSOR | - | - | - | wcs| - | - |
Write Cursor | IIS_WRITE | IMCURSOR | - | x | y | wcs| - | - |
Set Frame | IIS_WRITE | LUT|COMMAND | -1 | - | - | - | - | 2 |
Erase Frame | IIS_WRITE | fb | FEEDBACK | - | - | - | fr | - | - |
| | | | | | | | |
Old Read WCS | IIS_READ | WCS | - | - | - | fr | - | 320 |
Old Write WCS | IIS_WRITE|PACKED | WCS | -N | - | - | fr |fb | 320 |
| | | | | | | | |
WCS Version? | IIS_READ | WCS | - | 1 | 1 | - | - | 320 |
WCS by Num.? | IIS_READ | WCS | - | 1 | - | fr |wcs| 1024 |
New Read WCS | IIS_READ | WCS | - | 1 | - | fr | - | 1024 |
New Write WCS | IIS_WRITE|PACKED | WCS | -N | 1 | - | fr |fb | 1024 |
+------------------+-------------+-----+---+---+----+---+--------+
Where nbytes | NB = number of bytes expected or written
x = x position of operation in frame buffer coords
y = y position of operation in frame buffer coords
fr = frame number (passed as bitflag (i.e. 1, 2 ,4 8, etc)
fb = frame buffer config number (zero indexed)
N = length of WCS string
wcs = WCS number (usually zero)
Data = the number of bytes of data to be read or written
following the header packet.
"""
needs_update = False
# these NEED to be set automatically
# from the client interaction
width = None
height = None
frame = 0
x = 0
y = 0
y1 = -1
#sequence = -1
#key = None
#got_key = None
def decode_frameno(self, z):
try:
z = int(z)
except:
z = 1
if (not z):
z = 1
n = 0
while (not (z & 1)):
n += 1
z >>= 1
frame = max (1, n + 1)
return frame
def wcs_update(self, wcs_text, fb=None):
"""
parses the wcs_text and populates the fields
of a coord_tran instance.
we start from the coord_tran of the input
frame buffer, if any
"""
if (fb):
ct = fb.ct
else:
ct = coord_tran ()
if (not ct.valid):
ct.zt = W_UNITARY
# read wcs_text
data = string.split(wcs_text, '\n')
ct.imtitle = data[0]
# we are expecting 8 floats and 1 int
try:
(ct.a, ct.b, ct.c, ct.d,
ct.tx, ct.ty, ct.z1, ct.z2,
ct.zt) = string.split(data[1])
ct.a = float(ct.a)
ct.b = float(ct.b)
ct.c = float(ct.c)
ct.d = float(ct.d)
ct.tx = float(ct.tx)
ct.ty = float(ct.ty)
ct.z1 = float(ct.z1)
ct.z2 = float(ct.z2)
ct.zt = int(ct.zt)
except:
ct.imtitle = "[NO WCS]"
ct.a = 1
ct.d = 1
ct.b = 0
ct.c = 0
ct.tx = 0
ct.ty = 0
ct.zt = W_UNITARY
ct.valid += 1
# determine the best format for WCS output
if (ct.valid and ct.zt == W_LINEAR):
z1 = ct.z1
z2 = ct.z2
zrange = abs(z1 - z2)
zavg = (abs(z1) + abs(z2)) / 2.0
if (zrange < 100.0 and zavg < 200.0):
ct.format = " %7.2f %7.2f %7.3f%c"
elif (zrange > 99999.0 or zavg > 99999.0):
ct.format = " %7.2f %7.2f %7.3g%c"
else:
ct.format = W_DEFFORMAT
else:
ct.format = " %7.2f %7.2f %7.0f%c"
# add_mapping, if we can
if (len(data) < 4):
return(ct)
# we are expecting 1 string, 2 floats, and 6 int
try:
print("updating WCS: %s" % str(data[2]))
(ct.region, ct.sx, ct.sy, ct.snx,
ct.sny, ct.dx, ct.dy, ct.dnx,
ct.dny) = string.split(data[2])
ct.sx = float(ct.sx)
ct.sy = float(ct.sy)
ct.snx = int(ct.snx)
ct.sny = int(ct.sny)
# dx, dy: offset into frame where actual data starts
ct.dx = int(ct.dx)
ct.dy = int(ct.dy)
# dnx, dny: length of actual data in frame from offsets
ct.dnx = int(ct.dnx)
ct.dny = int(ct.dny)
ct.ref = string.strip(data[3])
# if this works, we also have the real size of the image
fb.img_width = ct.dnx + 1 # for some reason, the width is always
# 1 pixel smaller...
fb.img_height = ct.dny
except:
ct.region = 'none'
ct.sx = 1.0
ct.sy = 1.0
ct.snx = fb.width
ct.sny = fb.height
ct.dx = 1
ct.dy = 1
ct.dnx = fb.width
ct.dny = fb.height
ct.ref = 'none'
return (ct)
def return_cursor(self, dataout, sx, sy, frame, wcs, key, strval=''):
"""
writes the cursor position to dataout.
input:
dataout: the output stream
sx: x coordinate
sy: y coordinate
wcs: nonzero if we want WCS translation
frame: frame buffer index
key: keystroke used as trigger
strval: optional string value
"""
#print "RETURN CURSOR"
wcscode = (frame + 1) * 100 + wcs
if (key == '\32'):
curval = "EOF"
else:
if (key in string.printable and not key in string.whitespace):
keystr = key
else:
keystr = "\\%03o" % (ord(key))
# send the necessary infor to the client
curval = "%10.3f %10.3f %d %s %s\n" % (sx, sy, wcscode, keystr, strval)
dataout.write(right_pad(curval, SZ_IMCURVAL))
#print "END RETURN CURSOR"
def handle_feedback(self, pkt):
"""This part of the protocol is used by IRAF to erase a frame in
the framebuffers.
"""
self.logger.debug("handle feedback")
self.frame = self.decode_frameno(pkt.z & 0o7777) - 1
# erase the frame buffer
fb = self.server.controller.init_frame(self.frame)
self.server.controller.set_frame(self.frame)
def handle_lut(self, pkt):
"""This part of the protocol is used by IRAF to set the frame number.
"""
self.logger.debug("handle lut")
if pkt.subunit & COMMAND:
data_type = str(pkt.nbytes / 2) + 'h'
size = struct.calcsize(data_type)
line = pkt.datain.read(pkt.nbytes)
n = len(line)
if (n < pkt.nbytes):
return
try:
x = struct.unpack(data_type, line)
except Exception as e:
self.logger.error("Error unpacking struct: %s" % (str(e)))
return
if len(x) < 14:
# pad it with zeroes
y = []
for i in range(14):
try:
y.append(x[i])
except:
y.append(0)
x = y
del(y)
if len(x) == 14:
z = int(x[0])
# frames start from 1, we start from 0
self.frame = self.decode_frameno(z) - 1
if (self.frame > MAX_FRAMES):
self.logger.error("attempt to select non existing frame.")
return
# init the framebuffer
#self.server.controller.init_frame(self.frame)
try:
fb = self.server.controller.get_frame(self.frame)
except KeyError:
fb = self.server.controller.init_frame(self.frame)
return
self.logger.error("unable to select a frame.")
return
self.logger.error("what shall I do?")
def handle_wcs(self, pkt):
"""
This part of the protocol is used by IRAF to bidirectionally
communicate metadata about frames in the framebuffers.
IIS WCS format:
name - title\n
a b c d tx ty z1 z2 zt\n
region_name sx sy snx sny dx dy dnx dny\n
object_ref
where the new parameters are defined as
region_name - user-defined name for the region (e.g. 'image',
'subras1', 'ccd3', etc).
sx, sy, snx, sny - source rect in the object
dx, dy, dnx, dny - dest rect in the display frame buffer
object_ref - full node!/path/image[sec] image name, same as
was immap'd when the image was displayed. Used
for access after the display
"""
self.logger.debug("handle wcs")
if pkt.tid & IIS_READ:
self.logger.debug("iis read")
# Return the WCS for the referenced frame.
if (pkt.x & 0o17777) and (pkt.y & 0o17777):
# return IIS version number
text = "version=" + str(IIS_VERSION)
text = right_pad(text, SZ_OLD_WCSBUF)
else:
frame = self.decode_frameno(pkt.z & 0o177777) - 1
try:
fb = self.server.controller.get_frame(frame)
except KeyError:
fb = None
self.logger.debug("frame=%d fb=%s" % (frame, fb))
if (pkt.x & 0o17777) and (pkt.t & 0o17777):
self.frame = frame
if (fb and fb.ct.a is not None):
wcs = "%s\n%f %f %f %f %f %f %f %f %d\n" % (
fb.ct.imtitle, fb.ct.a, fb.ct.b, fb.ct.c, fb.ct.d,
fb.ct.tx, fb.ct.ty, fb.ct.z1, fb.ct.z2, fb.ct.zt)
else:
wcs = "[NOSUCHWCS]\n"
if (fb and fb.ct.sx is not None):
mapping = "%s %f %f %d %d %d %d %d %d\n%s\n" % (
fb.ct.region, fb.ct.sx, fb.ct.sy, fb.ct.snx, fb.ct.sny,
fb.ct.dx, fb.ct.dy, fb.ct.dnx, fb.ct.dny, fb.ct.ref)
else:
mapping = ""
text = wcs + mapping
text = right_pad(text, SZ_WCSBUF)
else:
if (frame < 0) or (fb is None) or (fb.buffer is None) or \
(len(fb.buffer) == 0):
text = "[NOSUCHFRAME]"
else:
text = fb.wcs
# old style or new style?
if pkt.x & 0o777:
text = right_pad(text, SZ_WCSBUF)
else:
text = right_pad(text, SZ_OLD_WCSBUF)
self.logger.debug("WCS: " + text)
pkt.dataout.write(text)
else:
self.logger.debug("iis write")
# Read the WCS information from the client
# frames start from 1, we start from 0
self.frame = self.decode_frameno(pkt.z & 0o7777) - 1
try:
fb = self.server.controller.get_frame(self.frame)
except KeyError:
# the selected frame does not exist, create it
fb = self.server.controller.init_frame(self.frame)
# set the width and height of the framebuffer
fb_config = (pkt.t & 0o777) + 1
try:
(nframes, fb.width, fb.height) = fbconfigs [fb_config]
except KeyError:
self.logger.warn('Non existing framebuffer config (%s)' % (
str(fb_config)))
self.logger.info('Adding a new framebuffer config (%s)' % (
str(fb_config)))
fbconfigs[fb_config] = [1, None, None]
fb.width = None
fb.height = None
# do we have to deal with the new WCS format? (not used, for now)
new_wcs = (pkt.x & 0o777)
# read the WCS info
line = pkt.datain.read(pkt.nbytes)
# paste it in the frame buffer
fb.wcs = line
fb.ct.format = W_DEFFORMAT
fb.ct.imtitle = ''
fb.ct.valid = 0
fb.ct = self.wcs_update(line, fb)
# end of handle_wcs()
def handle_memory(self, pkt):
"""This part of the protocol is used by IRAF to read/write image data
in the framebuffers.
"""
self.logger.debug("handle memory")
# get the frame number, we start from 0
self.frame = self.decode_frameno(pkt.z & 0o7777) - 1
try:
fb = self.server.controller.get_frame(self.frame)
except KeyError:
# the selected frame does not exist, create it
fb = self.server.controller.init_frame(self.frame)
self.x = pkt.x & XYMASK
self.y = pkt.y & XYMASK
self.logger.debug("memory frame=%d x,y=%d,%d fb width=%s height=%s" % (
self.frame, self.x, self.y, fb.width, fb.height))
if (pkt.tid & IIS_READ):
self.logger.debug("start memory read")
# read the data and send back to server
start = self.x + self.y * fb.width
end = start + pkt.nbytes
data = fb.buffer[start:end]
if len(data) != pkt.nbytes:
self.logger.warn("buffer length/packet size mismatch: %d != %d" % (
len(data), pkt.nbytes))
#data.reverse()
#self.logger.debug("DATA=%s" % str(data))
buf = data.tostring()
pkt.dataout.write(buf)
pkt.dataout.flush()
self.logger.debug("end memory read")
else:
self.logger.debug("start memory write")
# read the data from socket
t_bytes = 0
self.logger.debug("data bytes=%d needs_update=%s" % (
pkt.nbytes, self.needs_update))
if (fb.width is not None) and (fb.height is not None):
if not self.needs_update:
#del fb.buffer
#fb.buffer = array.array('B', ' ' * fb.width * fb.height)
if len(fb.buffer) != fb.width * fb.height:
fb.buffer = array.array('B', '\000' * fb.width * fb.height)
#self.needs_update = True
start = self.x + self.y * fb.width
end = start + pkt.nbytes
fb.buffer[start:end] = array.array('B', pkt.datain.read(pkt.nbytes))
else:
self.logger.warn("uninitialized framebuffer frame=%d" % (
self.frame))
if not self.needs_update:
# init the framebuffer
fb.buffer.fromstring(pkt.datain.read(pkt.nbytes))
fb.buffer.reverse()
#self.needs_update = True
else:
data = array.array('B', pkt.datain.read(pkt.nbytes))
data.reverse()
fb.buffer += data
self.needs_update = True
self.logger.debug("end memory write")
# width = fb.width
# if (not width and self.y1 < 0):
# self.y1 = self.y
# self.logger.debug('saved y coordinate.')
# elif not width:
# delta_y = self.y - self.y1
# width = int(abs(len(data) / delta_y))
# self.logger.debug('resetting framebuffer width=%d' % (
# width))
# fb.width = width
# # if we added a new fbconfigs entry, let's update
# # the value for the framebuffer width!
# if fbconfigs.has_key(fb.config):
# fbconfigs[fb.config][1] = width
def handle_imcursor(self, pkt):
"""This part of the protocol is used by IRAF to read the cursor
position and keystrokes from the display client.
"""
self.logger.debug("handle imcursor")
done = 0
if pkt.tid & IIS_READ:
if pkt.tid & IMC_SAMPLE:
self.logger.debug("SAMPLE")
# return the cursor position
wcsflag = int(pkt.z)
#wcsflag = 0
res = self.server.controller.get_keystroke()
self.return_cursor(pkt.dataout, res.x, res.y,
res.frame, wcsflag, '0', '')
else:
self.logger.debug("OTHER")
res = self.server.controller.get_keystroke()
self.logger.debug("FRAME=%d X,Y=%f,%f" % (
res.frame, res.x, res.y))
## sx = self.x
self.x = res.x
self.y = res.y
self.frame = res.frame
## sy = self.y
## frame = self.frame
#wcsflag = 1
wcsflag = 0
#self.return_cursor(pkt.dataout, sx, sy, frame, 1, key, '')
self.return_cursor(pkt.dataout, res.x, res.y,
res.frame, wcsflag, res.key, '')
else:
self.logger.debug("READ")
# read the cursor position in logical coordinates
sx = int(pkt.x)
sy = int(pkt.y)
wx = float(pkt.x)
wy = float(pkt.y)
wcs = int(pkt.z)
if wcs:
# decode the WCS info for the current frame
try:
fb = self.server.controller.get_frame(self.frame)
except KeyError:
# the selected frame does not exist, create it
fb = self.server.controller.init_frame(self.frame)
fb.ct = self.wcs_update(fb.wcs)
if fb.ct.valid:
if abs(fb.ct.a) > 0.001:
sx = int((wx - fb.ct.tx) / fb.ct.a)
if abs(fb.ct.d) > 0.001:
sy = int((wy - xt.ty) / fb.ct.d)
self.server.controller.set_cursor(sx, sy)
def handle(self):
"""
This is where the action starts.
"""
self.logger = self.server.logger
# create a packet structure
packet = iis()
packet.datain = self.rfile
packet.dataout = self.wfile
# decode the header
size = struct.calcsize('8h')
line = packet.datain.read(size)
n = len(line)
if n < size:
return
while n > 0:
try:
bytes = struct.unpack('8h', line)
except:
self.logger.error('error unpacking the data.')
for exctn in sys.exc_info():
print (exctn)
# TODO: verify checksum
# decode the packet fields
subunit = bytes[2]
subunit077 = subunit & 0o77
tid = bytes[0]
x = bytes[4] & 0o177777
y = bytes[5] & 0o177777
z = bytes[6] & 0o177777
t = bytes[7] & 0o17777
ndatabytes = - bytes[1]
# are the bytes packed?
if (not(tid & PACKED)):
ndatabytes *= 2
# populate the packet structure
packet.subunit = subunit
packet.subunit077 = subunit077
packet.tid = tid
packet.x = x
packet.y = y
packet.z = z
packet.t = t
packet.nbytes = ndatabytes
# decide what to do, depending on the
# value of subunit
self.logger.debug("PACKET IS %o" % packet.subunit)
if packet.subunit077 == FEEDBACK:
self.handle_feedback(packet)
elif packet.subunit077 == LUT:
self.handle_lut(packet)
# read the next packet
line = packet.datain.read(size)
n = len(line)
continue
elif packet.subunit077 == MEMORY:
self.handle_memory(packet)
if self.needs_update:
#self.display_image()
pass
# read the next packet
line = packet.datain.read(size)
n = len(line)
continue
elif packet.subunit077 == WCS:
self.handle_wcs(packet)
line = packet.datain.read(size)
n = len(line)
continue
elif packet.subunit077 == IMCURSOR:
self.handle_imcursor(packet)
line = packet.datain.read(size)
n = len(line)
continue
else:
self.logger.debug('?NO OP (0%o)' % (packet.subunit077))
if not (packet.tid & IIS_READ):
# OK, discard the rest of the data
nbytes = packet.nbytes
while nbytes > 0:
# for (nbytes = ndatabytes; nbytes > 0; nbytes -= n):
if nbytes < SZ_FIFOBUF:
n = nbytes
else:
n = SZ_FIFOBUF
m = self.rfile.read(n)
if m <= 0:
break
nbytes -= n
# read the next packet
line = packet.datain.read(size)
n = len(line)
if n < size:
return
# <--- end of the while (n) loop
if self.needs_update:
self.display_image()
self.needs_update = False
def display_image(self, reset=1):
"""Utility routine used to display an updated frame from a framebuffer.
"""
try:
fb = self.server.controller.get_frame(self.frame)
except KeyError:
# the selected frame does not exist, create it
fb = self.server.controller.init_frame(self.frame)
if not fb.height:
width = fb.width
height = int(len(fb.buffer) / width)
fb.height = height
# display the image
if (len(fb.buffer) > 0) and (height > 0):
self.server.controller.display(self.frame, width, height,
True)
else:
self.server.controller.display(self.frame, fb.width, fb.height,
False)
def decode_iis(self, data):
f = file('/tmp/pippo', 'wb')
f.write(data)
f.close()
return (decoded_data)
# Frame buffer configurations
fbconfigs = {
1: [2, 512, 512],
2: [2, 800, 800],
3: [2, 1024, 1024],
4: [1, 1600, 1600],
5: [1, 2048, 2048],
6: [1, 4096, 4096],
7: [1, 8192, 8192],
8: [1, 1024, 4096],
9: [2, 1144, 880],
10: [2, 1144, 764],
11: [2, 128, 128],
12: [2, 256, 256],
13: [2, 128, 1056],
14: [2, 256, 1056],
15: [2, 1056, 128],
16: [2, 1056, 256],
17: [2, 1008, 648],
18: [2, 1024, 680],
19: [1, 4096, 1024],
20: [2, 388, 576],
21: [1, 3040, 976],
22: [1, 128, 1520],
23: [1, 256, 1520],
24: [1, 512, 1520],
25: [1, 960, 1520],
26: [1, 1232, 800],
27: [1, 3104, 512],
28: [1, 976, 3040],
29: [1, 800, 256],
30: [1, 256, 800],
31: [1, 1240, 400],
32: [2, 832, 800],
33: [2, 544, 512],
34: [1, 1056, 1024],
35: [1, 2080, 2048],
36: [2, 832, 820],
37: [2, 520, 512],
38: [1, 3104, 1024],
39: [1, 1232, 800],
40: [4, 1200, 600],
41: [1, 8800, 8800],
42: [1, 4400, 4400],
43: [1, 2200, 2200],
44: [1, 1100, 1100],
45: [1, 2080, 4644],
46: [1, 6400, 4644],
47: [1, 3200, 2322],
48: [1, 1600, 1161],
49: [1, 800, 581],
50: [1, 2048, 2500]}
class iis(object):
def __init__ (self):
self.tid = None
self.subunit = None
self.subunit077 = None
self.nbytes = None
self.x = None
self.y = None
self.z = None
self.t = None
self.datain = None
self.dataout = None
class coord_tran(object):
def __init__ (self):
# coordinate transformation:
# screen -> physical
self.valid = 0 # has the WCS been validated/parsed?
self.a = 1 # x scale factor
self.b = 0 # y scale factor
self.c = 0 # x cross factor
self.d = 1 # y cross factor
self.tx = 0 # translation in x
self.ty = 0 # translation in y
self.z1 = 0 # min greyscale value
self.z2 = 1 # max greyscale value
self.zt = W_UNITARY # greyscale mapping
self.format = '' # WCS output format
self.imtitle = '' # image title from WCS
# physical -> celestial
self.regid = None
self.id = None
# src/dst region mapping
self.ref = ''
self.region = ''
self.sx = 1.0
self.sy = 1.0
self.snx = DEF_FRAME_WIDTH
self.sny = DEF_FRAME_WIDTH
self.dx = 1
self.dy = 1
self.dnx = DEF_FRAME_WIDTH
self.dny = DEF_FRAME_WIDTH
class framebuffer(object):
def __init__ (self):
self.width = None # width of the framebuffer
self.height = None # height of the framebuffer
self.img_width = None # width of the image
self.img_height = None # height of the image
self.config = None # framebuffer config index
# (see fbconfigs dictionary)
self.wcs = None # WCS
self.image = None # the image data itself
self.bitmap = None # the image bitmap
self.buffer = None # used for screen updates
self.zoom = 1.0 # zoom level
self.ct = coord_tran()
self.chname = None
# utility routines
def wcs_pix_transform (ct, i, format=0):
"""Computes the WCS corrected pixel value given a coordinate
transformation and the raw pixel value.
Input:
ct coordinate transformation. instance of coord_tran.
i raw pixel intensity.
format format string (optional).
Returns:
WCS corrected pixel value
"""
z1 = float (ct.z1)
z2 = float (ct.z2)
i = float (i)
yscale = 128.0 / (z2 - z1)
if (format == 'T' or format == 't'):
format = 1
if (i == 0):
t = 0.
else:
if (ct.zt == W_LINEAR):
t = ((i - 1) * (z2 - z1) / 199.0) + z1;
t = max (z1, min (z2, t))
else:
t = float (i)
if (format > 1):
t = (z2 - t) * yscale
return (t)
def wcs_coord_transform (ct, x, y):
"""Computes tha WCS corrected pixel coordinates (RA and Dec
in degrees) given a coordinate transformation and the screen
coordinates (x and y, in pixels).
Input:
ct coordinate transformation. instance of coord_tran.
x x coordinate in pixels.
y y coordinate in pixels.
Returns:
(RA, Dec) in degrees (as floats).
"""
x = float (x)
y = float (y)
if (ct.valid):
# The imtool WCS assumes that the center of the first display
# pixel is at (0,0) but actually it is at (0.5,0.5).
#x -= 0.5
#y -= 0.5
if (abs(ct.a) > .001):
ra = ct.a * x + ct.c * y + ct.tx
if (abs(ct.d) > .001):
dec = ct.b * x + ct.d * y + ct.ty
else:
ra = x
dec = y
return ((ra, dec))
def sex2deg(sex, sep=':'):
try:
(dd, mm, ss) = string.split(string.strip(sex), sep)
except:
(dd, mm) = string.split(string.strip(sex), sep)
ss = '0'
if(float(dd) >= 0):
return(float(dd) + float(mm) / 60.0 + float(ss) / 3600.0)
else:
return(float(dd) - float(mm) / 60.0 - float(ss) / 3600.0)
def deg2sex (deg, sep=':'):
try:
deg = float (deg)
except:
return ('')
degrees = int (deg)
if(degrees >= 0):
temp = (deg - degrees) * 60
minutes = int (temp)
seconds = int ((temp - minutes) * 60)
else:
temp = - (deg - degrees) * 60
minutes = int (temp)
seconds = int ((temp - minutes) * 60)
sex = "%02d%c%02d%c%05.2f" % (degrees, sep, minutes, sep, seconds)
return (sex)
def right_pad (strg, length, ch=' '):
"""As seen on http://www.halfcooked.com/mt/archives/000640.html"""
return (strg + ch * (length - len(strg)))
def get_interface(addr=None):
if addr:
imtdev = addr
else:
try:
imtdev = os.environ['IMTDEV']
except KeyError:
#port = 5137
uid = os.getuid()
path = '/tmp/.IMT' + str(uid)
prot = 'unix'
name = "%s:%s" % (prot, path)
return Bunch.Bunch(prot=prot, path=path, name=name)
n, match = 1, re.match(r'^(inet)\:(\d+)\:([\w\._\-]+)$', imtdev)
if not match:
n, match = 2, re.match(r'^(inet)\:(\d+)$', imtdev)
if not match:
n, match = 3, re.match(r'^(unix)\:(.+)$', imtdev)
if not match:
n, match = 4, re.match(r'^(\d+)$', imtdev)
if not match:
# Error
raise socketError("I don't understand the format of addr IMTDEV: '%s'" % (imtdev))
if n == 1:
prot, port, host = match.groups()
port = int(port)
return Bunch.Bunch(prot=prot, port=port, host=host, name=imtdev)
elif n == 2:
prot, port = match.groups()
port = int(port)
return Bunch.Bunch(prot=prot, port=port, host='', name=imtdev)
elif n == 3:
prot, path = match.groups()
return Bunch.Bunch(prot=prot, path=path, name=imtdev)
elif n == 4:
port = match.group(1)
port = int(port)
prot = 'inet'
return Bunch.Bunch(prot=prot, port=port, host='', name=imtdev)
#END
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AnalysisInput
from ._models_py3 import AnalyzeBatchInput
from ._models_py3 import AnalyzeJobDisplayName
from ._models_py3 import AnalyzeJobErrorsAndStatistics
from ._models_py3 import AnalyzeJobMetadata
from ._models_py3 import AnalyzeJobState
from ._models_py3 import DetectedLanguage
from ._models_py3 import DocumentEntities
from ._models_py3 import DocumentError
from ._models_py3 import DocumentHealthcareEntities
from ._models_py3 import DocumentKeyPhrases
from ._models_py3 import DocumentLanguage
from ._models_py3 import DocumentLinkedEntities
from ._models_py3 import DocumentSentiment
from ._models_py3 import DocumentStatistics
from ._models_py3 import EntitiesResult
from ._models_py3 import EntitiesTask
from ._models_py3 import EntitiesTaskParameters
from ._models_py3 import EntitiesTaskResult
from ._models_py3 import Entity
from ._models_py3 import EntityLinkingResult
from ._models_py3 import EntityLinkingTask
from ._models_py3 import EntityLinkingTaskParameters
from ._models_py3 import EntityLinkingTaskResult
from ._models_py3 import ErrorResponse
from ._models_py3 import HealthcareAssertion
from ._models_py3 import HealthcareEntity
from ._models_py3 import HealthcareEntityLink
from ._models_py3 import HealthcareEntityProperties
from ._models_py3 import HealthcareJobState
from ._models_py3 import HealthcareLinkingProperties
from ._models_py3 import HealthcareRelation
from ._models_py3 import HealthcareRelationEntity
from ._models_py3 import HealthcareResult
from ._models_py3 import HealthcareTaskResult
from ._models_py3 import InnerError
from ._models_py3 import JobDescriptor
from ._models_py3 import JobManifest
from ._models_py3 import JobManifestTasks
from ._models_py3 import JobMetadata
from ._models_py3 import KeyPhraseResult
from ._models_py3 import KeyPhraseTaskResult
from ._models_py3 import KeyPhrasesTask
from ._models_py3 import KeyPhrasesTaskParameters
from ._models_py3 import LanguageBatchInput
from ._models_py3 import LanguageInput
from ._models_py3 import LanguageResult
from ._models_py3 import LinkedEntity
from ._models_py3 import Match
from ._models_py3 import MultiLanguageBatchInput
from ._models_py3 import MultiLanguageInput
from ._models_py3 import Pagination
from ._models_py3 import PiiDocumentEntities
from ._models_py3 import PiiResult
from ._models_py3 import PiiTask
from ._models_py3 import PiiTaskParameters
from ._models_py3 import PiiTaskResult
from ._models_py3 import RequestStatistics
from ._models_py3 import SentenceAssessment
from ._models_py3 import SentenceSentiment
from ._models_py3 import SentenceTarget
from ._models_py3 import SentimentAnalysisTask
from ._models_py3 import SentimentAnalysisTaskParameters
from ._models_py3 import SentimentConfidenceScorePerLabel
from ._models_py3 import SentimentResponse
from ._models_py3 import SentimentTaskResult
from ._models_py3 import TargetConfidenceScoreLabel
from ._models_py3 import TargetRelation
from ._models_py3 import TaskState
from ._models_py3 import TasksState
from ._models_py3 import TasksStateTasks
from ._models_py3 import TasksStateTasksEntityLinkingTasksItem
from ._models_py3 import TasksStateTasksEntityRecognitionPiiTasksItem
from ._models_py3 import TasksStateTasksEntityRecognitionTasksItem
from ._models_py3 import TasksStateTasksKeyPhraseExtractionTasksItem
from ._models_py3 import TasksStateTasksSentimentAnalysisTasksItem
from ._models_py3 import TextAnalyticsError
from ._models_py3 import TextAnalyticsWarning
except (SyntaxError, ImportError):
from ._models import AnalysisInput # type: ignore
from ._models import AnalyzeBatchInput # type: ignore
from ._models import AnalyzeJobDisplayName # type: ignore
from ._models import AnalyzeJobErrorsAndStatistics # type: ignore
from ._models import AnalyzeJobMetadata # type: ignore
from ._models import AnalyzeJobState # type: ignore
from ._models import DetectedLanguage # type: ignore
from ._models import DocumentEntities # type: ignore
from ._models import DocumentError # type: ignore
from ._models import DocumentHealthcareEntities # type: ignore
from ._models import DocumentKeyPhrases # type: ignore
from ._models import DocumentLanguage # type: ignore
from ._models import DocumentLinkedEntities # type: ignore
from ._models import DocumentSentiment # type: ignore
from ._models import DocumentStatistics # type: ignore
from ._models import EntitiesResult # type: ignore
from ._models import EntitiesTask # type: ignore
from ._models import EntitiesTaskParameters # type: ignore
from ._models import EntitiesTaskResult # type: ignore
from ._models import Entity # type: ignore
from ._models import EntityLinkingResult # type: ignore
from ._models import EntityLinkingTask # type: ignore
from ._models import EntityLinkingTaskParameters # type: ignore
from ._models import EntityLinkingTaskResult # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import HealthcareAssertion # type: ignore
from ._models import HealthcareEntity # type: ignore
from ._models import HealthcareEntityLink # type: ignore
from ._models import HealthcareEntityProperties # type: ignore
from ._models import HealthcareJobState # type: ignore
from ._models import HealthcareLinkingProperties # type: ignore
from ._models import HealthcareRelation # type: ignore
from ._models import HealthcareRelationEntity # type: ignore
from ._models import HealthcareResult # type: ignore
from ._models import HealthcareTaskResult # type: ignore
from ._models import InnerError # type: ignore
from ._models import JobDescriptor # type: ignore
from ._models import JobManifest # type: ignore
from ._models import JobManifestTasks # type: ignore
from ._models import JobMetadata # type: ignore
from ._models import KeyPhraseResult # type: ignore
from ._models import KeyPhraseTaskResult # type: ignore
from ._models import KeyPhrasesTask # type: ignore
from ._models import KeyPhrasesTaskParameters # type: ignore
from ._models import LanguageBatchInput # type: ignore
from ._models import LanguageInput # type: ignore
from ._models import LanguageResult # type: ignore
from ._models import LinkedEntity # type: ignore
from ._models import Match # type: ignore
from ._models import MultiLanguageBatchInput # type: ignore
from ._models import MultiLanguageInput # type: ignore
from ._models import Pagination # type: ignore
from ._models import PiiDocumentEntities # type: ignore
from ._models import PiiResult # type: ignore
from ._models import PiiTask # type: ignore
from ._models import PiiTaskParameters # type: ignore
from ._models import PiiTaskResult # type: ignore
from ._models import RequestStatistics # type: ignore
from ._models import SentenceAssessment # type: ignore
from ._models import SentenceSentiment # type: ignore
from ._models import SentenceTarget # type: ignore
from ._models import SentimentAnalysisTask # type: ignore
from ._models import SentimentAnalysisTaskParameters # type: ignore
from ._models import SentimentConfidenceScorePerLabel # type: ignore
from ._models import SentimentResponse # type: ignore
from ._models import SentimentTaskResult # type: ignore
from ._models import TargetConfidenceScoreLabel # type: ignore
from ._models import TargetRelation # type: ignore
from ._models import TaskState # type: ignore
from ._models import TasksState # type: ignore
from ._models import TasksStateTasks # type: ignore
from ._models import TasksStateTasksEntityLinkingTasksItem # type: ignore
from ._models import TasksStateTasksEntityRecognitionPiiTasksItem # type: ignore
from ._models import TasksStateTasksEntityRecognitionTasksItem # type: ignore
from ._models import TasksStateTasksKeyPhraseExtractionTasksItem # type: ignore
from ._models import TasksStateTasksSentimentAnalysisTasksItem # type: ignore
from ._models import TextAnalyticsError # type: ignore
from ._models import TextAnalyticsWarning # type: ignore
from ._text_analytics_client_enums import (
Association,
Certainty,
Conditionality,
DocumentSentimentValue,
ErrorCodeValue,
HealthcareEntityCategory,
InnerErrorCodeValue,
PiiCategory,
PiiTaskParametersDomain,
RelationType,
SentenceSentimentValue,
State,
StringIndexType,
TargetRelationType,
TokenSentimentValue,
WarningCodeValue,
)
__all__ = [
'AnalysisInput',
'AnalyzeBatchInput',
'AnalyzeJobDisplayName',
'AnalyzeJobErrorsAndStatistics',
'AnalyzeJobMetadata',
'AnalyzeJobState',
'DetectedLanguage',
'DocumentEntities',
'DocumentError',
'DocumentHealthcareEntities',
'DocumentKeyPhrases',
'DocumentLanguage',
'DocumentLinkedEntities',
'DocumentSentiment',
'DocumentStatistics',
'EntitiesResult',
'EntitiesTask',
'EntitiesTaskParameters',
'EntitiesTaskResult',
'Entity',
'EntityLinkingResult',
'EntityLinkingTask',
'EntityLinkingTaskParameters',
'EntityLinkingTaskResult',
'ErrorResponse',
'HealthcareAssertion',
'HealthcareEntity',
'HealthcareEntityLink',
'HealthcareEntityProperties',
'HealthcareJobState',
'HealthcareLinkingProperties',
'HealthcareRelation',
'HealthcareRelationEntity',
'HealthcareResult',
'HealthcareTaskResult',
'InnerError',
'JobDescriptor',
'JobManifest',
'JobManifestTasks',
'JobMetadata',
'KeyPhraseResult',
'KeyPhraseTaskResult',
'KeyPhrasesTask',
'KeyPhrasesTaskParameters',
'LanguageBatchInput',
'LanguageInput',
'LanguageResult',
'LinkedEntity',
'Match',
'MultiLanguageBatchInput',
'MultiLanguageInput',
'Pagination',
'PiiDocumentEntities',
'PiiResult',
'PiiTask',
'PiiTaskParameters',
'PiiTaskResult',
'RequestStatistics',
'SentenceAssessment',
'SentenceSentiment',
'SentenceTarget',
'SentimentAnalysisTask',
'SentimentAnalysisTaskParameters',
'SentimentConfidenceScorePerLabel',
'SentimentResponse',
'SentimentTaskResult',
'TargetConfidenceScoreLabel',
'TargetRelation',
'TaskState',
'TasksState',
'TasksStateTasks',
'TasksStateTasksEntityLinkingTasksItem',
'TasksStateTasksEntityRecognitionPiiTasksItem',
'TasksStateTasksEntityRecognitionTasksItem',
'TasksStateTasksKeyPhraseExtractionTasksItem',
'TasksStateTasksSentimentAnalysisTasksItem',
'TextAnalyticsError',
'TextAnalyticsWarning',
'Association',
'Certainty',
'Conditionality',
'DocumentSentimentValue',
'ErrorCodeValue',
'HealthcareEntityCategory',
'InnerErrorCodeValue',
'PiiCategory',
'PiiTaskParametersDomain',
'RelationType',
'SentenceSentimentValue',
'State',
'StringIndexType',
'TargetRelationType',
'TokenSentimentValue',
'WarningCodeValue',
]
| |
#!/usr/bin/env python
#
# Compatibility stub which now executes JS-based tooling.
#
# Should be Python2 and Python3 compatible.
import os
import sys
import time
import subprocess
import optparse
import yaml
import tempfile
def detect_nodejs():
try:
cmd = [ 'nodejs', '-e', 'console.log("test")' ]
res = subprocess.check_output(cmd)
if res[:4] == 'test'.encode('utf-8'):
return 'nodejs'
except:
pass
try:
cmd = [ 'node', '-e', 'console.log("test")' ]
res = subprocess.check_output(cmd)
if res[:4] == 'test'.encode('utf-8'):
return 'node'
except:
pass
return None
def main():
sys.stderr.write('\n')
sys.stderr.write('****************************************************************************\n')
sys.stderr.write('*** Duktape python tooling is obsolete, migrate to JS-based tooling! ***\n')
sys.stderr.write('*** This tool now internally invokes the JS-based tooling. ***\n')
sys.stderr.write('*** Minimum Node.js version is 14.x. ***\n')
sys.stderr.write('****************************************************************************\n')
sys.stderr.write('\n')
time.sleep(2)
parser = optparse.OptionParser(
usage='Usage: %prog [options]',
description='Compatibility stub for JS-based tooling'
)
# Forced options from multiple sources are gathered into a shared list
# so that the override order remains the same as on the command line.
force_options_yaml = []
def add_force_option_yaml(option, opt, value, parser):
force_options_yaml.append(value)
def add_force_option_file(option, opt, value, parser):
with open(value, 'rb') as f:
force_options_yaml.append(f.read())
def add_force_option_define(option, opt, value, parser):
defname, eq, defval = value.partition('=')
if not eq:
doc = { defname: True }
else:
defname, paren, defargs = defname.partition('(')
if not paren:
doc = { defname: defval }
else:
doc = { defname: { 'verbatim': '#define %s%s%s %s' % (defname, paren, defargs, defval) } }
force_options_yaml.append(yaml.safe_dump(doc))
def add_force_option_undefine(option, opt, value, parser):
tmp = value.split('=')
if len(tmp) == 1:
doc = { tmp[0]: False }
else:
raise Exception('invalid option value: %r' % value)
force_options_yaml.append(yaml.safe_dump(doc))
fixup_header_lines = []
def add_fixup_header_line(option, opt, value, parser):
fixup_header_lines.append(value)
def add_fixup_header_file(option, opt, value, parser):
with open(value, 'rb') as f:
for line in f:
if line[-1] == '\n':
line = line[:-1]
fixup_header_lines.append(line)
# Log level options.
parser.add_option('--quiet', dest='quiet', action='store_true', default=False, help='Suppress info messages (show warnings)')
parser.add_option('--verbose', dest='verbose', action='store_true', default=False, help='Show verbose debug messages')
# Options for configure.py tool itself.
parser.add_option('--source-directory', dest='source_directory', default=None, help='Directory with raw input sources (defaulted based on configure.py script path)')
parser.add_option('--output-directory', dest='output_directory', default=None, help='Directory for output files (created automatically if it doesn\'t exist, reused if safe)')
parser.add_option('--license-file', dest='license_file', default=None, help='Source for LICENSE.txt (defaulted based on configure.py script path)')
parser.add_option('--authors-file', dest='authors_file', default=None, help='Source for AUTHORS.rst (defaulted based on configure.py script path)')
parser.add_option('--git-commit', dest='git_commit', default=None, help='Force git commit hash')
parser.add_option('--git-describe', dest='git_describe', default=None, help='Force git describe')
parser.add_option('--git-branch', dest='git_branch', default=None, help='Force git branch name')
parser.add_option('--duk-dist-meta', dest='duk_dist_meta', default=None, help='duk_dist_meta.json to read git commit etc info from')
# Options for combining sources.
parser.add_option('--separate-sources', dest='separate_sources', action='store_true', default=False, help='Output separate sources instead of combined source (default is combined)')
parser.add_option('--line-directives', dest='line_directives', action='store_true', default=False, help='Output #line directives in combined source (default is false)')
# Options forwarded to genbuiltins.py.
parser.add_option('--rom-support', dest='rom_support', action='store_true', help='Add support for ROM strings/objects (increases duktape.c size considerably)')
parser.add_option('--rom-auto-lightfunc', dest='rom_auto_lightfunc', action='store_true', default=False, help='Convert ROM built-in function properties into lightfuncs automatically whenever possible')
parser.add_option('--user-builtin-metadata', dest='obsolete_builtin_metadata', default=None, help=optparse.SUPPRESS_HELP)
parser.add_option('--builtin-file', dest='builtin_files', metavar='FILENAME', action='append', default=[], help='Built-in string/object YAML metadata to be applied over default built-ins (multiple files may be given, applied in sequence)')
# Options for Unicode.
parser.add_option('--unicode-data', dest='unicode_data', default=None, help='Provide custom UnicodeData.txt')
parser.add_option('--special-casing', dest='special_casing', default=None, help='Provide custom SpecialCasing.txt')
# Options for genconfig.py.
parser.add_option('--config-metadata', dest='config_metadata', default=None, help='metadata directory (defaulted based on configure.py script path)')
parser.add_option('--platform', dest='platform', default=None, help='platform (default is autodetect)')
parser.add_option('--compiler', dest='compiler', default=None, help='compiler (default is autodetect)')
parser.add_option('--architecture', dest='architecture', default=None, help='architecture (default is autodetec)')
parser.add_option('--c99-types-only', dest='c99_types_only', action='store_true', default=False, help='assume C99 types, no legacy type detection')
parser.add_option('--dll', dest='dll', action='store_true', default=False, help='dll build of Duktape, affects symbol visibility macros especially on Windows')
parser.add_option('--support-feature-options', dest='support_feature_options', action='store_true', default=False, help=optparse.SUPPRESS_HELP)
parser.add_option('--emit-legacy-feature-check', dest='emit_legacy_feature_check', action='store_true', default=False, help='emit preprocessor checks to reject legacy feature options (DUK_OPT_xxx)')
parser.add_option('--emit-config-sanity-check', dest='emit_config_sanity_check', action='store_true', default=False, help='emit preprocessor checks for config option consistency (DUK_USE_xxx)')
parser.add_option('--omit-removed-config-options', dest='omit_removed_config_options', action='store_true', default=False, help='omit removed config options from generated headers')
parser.add_option('--omit-deprecated-config-options', dest='omit_deprecated_config_options', action='store_true', default=False, help='omit deprecated config options from generated headers')
parser.add_option('--omit-unused-config-options', dest='omit_unused_config_options', action='store_true', default=False, help='omit unused config options from generated headers')
parser.add_option('--add-active-defines-macro', dest='add_active_defines_macro', action='store_true', default=False, help='add DUK_ACTIVE_DEFINES macro, for development only')
parser.add_option('--define', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_define, default=force_options_yaml, help='force #define option using a C compiler like syntax, e.g. "--define DUK_USE_DEEP_C_STACK" or "--define DUK_USE_TRACEBACK_DEPTH=10"')
parser.add_option('-D', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_define, default=force_options_yaml, help='synonym for --define, e.g. "-DDUK_USE_DEEP_C_STACK" or "-DDUK_USE_TRACEBACK_DEPTH=10"')
parser.add_option('--undefine', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_undefine, default=force_options_yaml, help='force #undef option using a C compiler like syntax, e.g. "--undefine DUK_USE_DEEP_C_STACK"')
parser.add_option('-U', type='string', metavar='OPTION', dest='force_options_yaml', action='callback', callback=add_force_option_undefine, default=force_options_yaml, help='synonym for --undefine, e.g. "-UDUK_USE_DEEP_C_STACK"')
parser.add_option('--option-yaml', type='string', metavar='YAML', dest='force_options_yaml', action='callback', callback=add_force_option_yaml, default=force_options_yaml, help='force option(s) using inline YAML (e.g. --option-yaml "DUK_USE_DEEP_C_STACK: true")')
parser.add_option('--option-file', type='string', metavar='FILENAME', dest='force_options_yaml', action='callback', callback=add_force_option_file, default=force_options_yaml, help='YAML file(s) providing config option overrides')
parser.add_option('--fixup-file', type='string', metavar='FILENAME', dest='fixup_header_lines', action='callback', callback=add_fixup_header_file, default=fixup_header_lines, help='C header snippet file(s) to be appended to generated header, useful for manual option fixups')
parser.add_option('--fixup-line', type='string', metavar='LINE', dest='fixup_header_lines', action='callback', callback=add_fixup_header_line, default=fixup_header_lines, help='C header fixup line to be appended to generated header (e.g. --fixup-line "#define DUK_USE_FASTINT")')
parser.add_option('--sanity-warning', dest='sanity_strict', action='store_false', default=True, help='emit a warning instead of #error for option sanity check issues')
parser.add_option('--use-cpp-warning', dest='use_cpp_warning', action='store_true', default=False, help='emit a (non-portable) #warning when appropriate')
parser.add_option('--nodejs-command', dest='nodejs_command', default=None, help='Force Node.js command name')
entry_cwd = os.getcwd()
script_path = sys.path[0] # http://stackoverflow.com/questions/4934806/how-can-i-find-scripts-directory-with-python
(opts, args) = parser.parse_args()
if len(args) > 0:
raise Exception('unexpected arguments: %r' % args)
if opts.obsolete_builtin_metadata is not None:
raise Exception('--user-builtin-metadata has been removed, use --builtin-file instead')
if opts.nodejs_command is None:
nodejs_command = detect_nodejs()
else:
nodejs_command = opts.nodejs_command
if nodejs_command is None:
raise Exception('failed to detect Node.js, override with --nodejs-command')
duktool_path = None
for fn in [
os.path.join(script_path, 'duktool.js'),
os.path.join(script_path, '..', 'src-tools', 'index.js'),
os.path.join(script_path, '..', 'src-tools', 'duktool.js')
]:
if os.path.isfile(fn):
duktool_path = fn
break
if duktool_path is None:
raise Exception('could not find duktool.js or src-tools/index.js')
cmd = [
nodejs_command,
duktool_path,
'configure'
]
if opts.output_directory is not None:
cmd += [ '--output-directory', opts.output_directory ]
if opts.source_directory is not None:
cmd += [ '--source-directory', opts.source_directory ]
else:
src_dir = os.path.join(script_path, '..', 'src-input')
if os.path.isdir(src_dir) and os.path.isfile(os.path.join(src_dir, 'duktape.h.in')):
cmd += [ '--source-directory', src_dir ]
if opts.config_metadata is not None:
cmd += [ '--config-directory', opts.config_metadata ]
forced = {}
for i in force_options_yaml:
doc = yaml.safe_load(i)
for k,v in doc.items():
forced[k] = v
opts_fd, opts_fn = tempfile.mkstemp()
with os.fdopen(opts_fd, 'wb') as f:
f.write(yaml.safe_dump(forced).encode('utf-8'))
cmd += [ '--option-file', opts_fn ]
fixup_fd, fixup_fn = tempfile.mkstemp()
with os.fdopen(fixup_fd, 'wb') as f:
f.write(('\n'.join(fixup_header_lines) + '\n').encode('utf-8'))
cmd += [ '--fixup-file', fixup_fn ]
for i in opts.builtin_files:
cmd += [ '--builtin-file', i ]
if opts.line_directives:
cmd += [ '--line-directives' ]
if opts.platform is not None:
cmd += [ '--platform', opts.platform ]
if opts.compiler is not None:
cmd += [ '--compiler', opts.compiler ]
if opts.architecture is not None:
cmd += [ '--architecture', opts.architecture ]
if opts.dll:
cmd += [ '--dll' ]
if opts.c99_types_only:
cmd += [ '--c99-types-only' ]
sys.stderr.write('*** Executing JS-based tooling with command: ' + repr(cmd) + '\n\n')
subprocess.check_call(cmd)
if __name__ == '__main__':
main()
| |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
import six
from nova import block_device
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.tests import uuidsentinel as uuids
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice,
'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm_dict = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm_dict = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm_dict = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm_dict = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm_dict = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
blank_bdm_dict = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
blank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
blank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
# create bdm objects for testing
self.swap_bdm = fake_block_device.fake_bdm_object(
self.context, self.swap_bdm_dict)
self.ephemeral_bdm = fake_block_device.fake_bdm_object(
self.context, self.ephemeral_bdm_dict)
self.volume_bdm = fake_block_device.fake_bdm_object(
self.context, self.volume_bdm_dict)
self.snapshot_bdm = fake_block_device.fake_bdm_object(
self.context, self.snapshot_bdm_dict)
self.image_bdm = fake_block_device.fake_bdm_object(
self.context, self.image_bdm_dict)
self.blank_bdm = fake_block_device.fake_bdm_object(
self.context, self.blank_bdm_dict)
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
bdm = fake_block_device.fake_bdm_object(
self.context, {'no_device': True})
self.assertRaises(driver_block_device._NotTransformable,
cls, bdm)
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in six.iteritems(db_bdm):
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in six.iteritems(self.driver_classes):
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
for fld, alias in six.iteritems(test_bdm._update_on_save):
# We can't set fake values on enums, like device_type,
# so skip those.
if not isinstance(test_bdm._bdm_obj.fields[fld],
fields.BaseEnumField):
test_bdm[alias or fld] = 'fake_changed_value'
test_bdm.save()
for fld, alias in six.iteritems(test_bdm._update_on_save):
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with()
def check_save():
self.assertEqual(set([]), test_bdm._bdm_obj.obj_what_changed())
# Test that nothing is set on the object if there are no actual changes
test_bdm._bdm_obj.obj_reset_changes()
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
save_mock.side_effect = check_save
test_bdm.save()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm_dict" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](
fake_block_device.fake_bdm_object(self.context, no_size_bdm))
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](
fake_block_device.fake_bdm_object(self.context, no_size_bdm))
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm_dict.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'],
fake_block_device.fake_bdm_object(self.context, bdm))
def test_driver_blank_block_device(self):
self._test_driver_device('blank')
test_bdm = self.driver_classes['blank'](
self.blank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_call_wait_func(self, delete_on_termination, delete_fail=False):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['delete_on_termination'] = delete_on_termination
with mock.patch.object(self.volume_api, 'delete') as vol_delete:
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
if delete_on_termination and delete_fail:
vol_delete.side_effect = Exception()
self.assertRaises(exception.VolumeNotCreated,
test_bdm._call_wait_func,
context=self.context,
wait_func=wait_func,
volume_api=self.volume_api,
volume_id='fake-id')
self.assertEqual(delete_on_termination, vol_delete.called)
def test_call_wait_delete_volume(self):
self._test_call_wait_func(True)
def test_call_wait_delete_volume_fail(self):
self._test_call_wait_func(True, True)
def test_call_wait_no_delete_volume(self):
self._test_call_wait_func(False)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, volume_attach=True,
fail_volume_attach=False, access_mode='rw',
availability_zone=None):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance_detail = {'id': '123', 'uuid': uuids.uuid,
'availability_zone': availability_zone}
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(None)
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
if volume_attach:
driver_bdm._bdm_obj.save().AndReturn(None)
if not fail_volume_attach:
self.volume_api.attach(elevated_context, fake_volume['id'],
uuids.uuid, bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
else:
self.volume_api.attach(elevated_context, fake_volume['id'],
uuids.uuid, bdm_dict['device_name'],
mode=access_mode).AndRaise(
test.TestingException)
if driver_attach:
self.virt_driver.detach_volume(
expected_conn_info, instance,
bdm_dict['device_name'],
encryption=enc_data).AndReturn(None)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(None)
self.volume_api.detach(elevated_context,
fake_volume['id']).AndReturn(None)
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_update_size(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm.volume_size = None
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached',
'size': 42}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertEqual(expected_conn_info, test_bdm['connection_info'])
self.assertEqual(42, test_bdm.volume_size)
def test_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True,
fail_driver_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_volume_attach_volume_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True,
fail_volume_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_volume_attach_no_driver_attach_volume_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_volume_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=False)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save().AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](
fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot))
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3, '', '', snapshot,
availability_zone=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_no_volume_cinder_cross_az_attach_false(self):
# Tests that the volume created from the snapshot has the same AZ as
# the instance.
self.flags(cross_az_attach=False, group='cinder')
no_volume_snapshot = self.snapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](
fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot))
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3, '', '', snapshot,
availability_zone='test-az').AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume,
availability_zone='test-az')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_snapshot_attach_fail_volume(self):
fail_volume_snapshot = self.snapshot_bdm_dict.copy()
fail_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](
fake_block_device.fake_bdm_object(
self.context, fail_volume_snapshot))
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
with test.nested(
mock.patch.object(self.volume_api, 'get_snapshot',
return_value=snapshot),
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_get_snap, vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_get_snap.assert_called_once_with(
self.context, 'fake-snapshot-id-1')
vol_create.assert_called_once_with(
self.context, 3, '', '', snapshot, availability_zone=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm_dict.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](
fake_block_device.fake_bdm_object(
self.context, no_volume_image))
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1, '', '', image_id=image['id'],
availability_zone=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume_cinder_cross_az_attach_false(self):
# Tests that the volume created from the image has the same AZ as the
# instance.
self.flags(cross_az_attach=False, group='cinder')
no_volume_image = self.image_bdm_dict.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](
fake_block_device.fake_bdm_object(
self.context, no_volume_image))
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1, '', '', image_id=image['id'],
availability_zone='test-az').AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume,
availability_zone='test-az')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_image_attach_fail_volume(self):
fail_volume_image = self.image_bdm_dict.copy()
fail_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](
fake_block_device.fake_bdm_object(
self.context, fail_volume_image))
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, 1, '', '', image_id=image['id'],
availability_zone=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_fail_volume(self):
no_blank_volume = self.blank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', availability_zone=None)
vol_delete.assert_called_once_with(
self.context, volume['id'])
def test_blank_attach_volume(self):
no_blank_volume = self.blank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', availability_zone=None)
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_blank_attach_volume_cinder_cross_az_attach_false(self):
# Tests that the blank volume created is in the same availability zone
# as the instance.
self.flags(cross_az_attach=False, group='cinder')
no_blank_volume = self.blank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with mock.patch.object(self.volume_api, 'create',
return_value=volume) as vol_create:
with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', availability_zone='test-az')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
bdms = objects.BlockDeviceMappingList(
objects=[self.volume_bdm, self.ephemeral_bdm])
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'], bdms)
self.assertEqual(converted, [self.volume_driver_bdm])
def test_convert_all_volumes(self):
converted = driver_block_device.convert_all_volumes()
self.assertEqual([], converted)
converted = driver_block_device.convert_all_volumes(
self.volume_bdm, self.ephemeral_bdm, self.image_bdm,
self.blank_bdm, self.snapshot_bdm)
self.assertEqual(converted, [self.volume_driver_bdm,
self.image_driver_bdm,
self.blank_driver_bdm,
self.snapshot_driver_bdm])
def test_convert_volume(self):
self.assertIsNone(driver_block_device.convert_volume(self.swap_bdm))
self.assertEqual(self.volume_driver_bdm,
driver_block_device.convert_volume(self.volume_bdm))
self.assertEqual(self.snapshot_driver_bdm,
driver_block_device.convert_volume(self.snapshot_bdm))
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in range(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in range(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm_dict.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(
fake_block_device.fake_bdm_object(self.context, local_image)))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['image'](self.image_bdm)
test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['blank'](self.blank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
def test_get_volume_create_az_cinder_cross_az_attach_true(self):
# Tests that we get None back if cinder.cross_az_attach=True even if
# the instance has an AZ assigned. Note that since cross_az_attach
# defaults to True we don't need to set a flag explicitly for the test.
updates = {'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(self.context, **updates)
self.assertIsNone(
driver_block_device._get_volume_create_az_value(instance))
def test_refresh_conn_infos(self):
# Only DriverVolumeBlockDevice derived devices should refresh their
# connection_info during a refresh_conn_infos call.
test_volume = mock.MagicMock(
spec=driver_block_device.DriverVolumeBlockDevice)
test_image = mock.MagicMock(
spec=driver_block_device.DriverImageBlockDevice)
test_snapshot = mock.MagicMock(
spec=driver_block_device.DriverSnapshotBlockDevice)
test_blank = mock.MagicMock(
spec=driver_block_device.DriverBlankBlockDevice)
test_eph = mock.MagicMock(
spec=driver_block_device.DriverEphemeralBlockDevice)
test_swap = mock.MagicMock(
spec=driver_block_device.DriverSwapBlockDevice)
block_device_mapping = [test_volume, test_image, test_eph,
test_snapshot, test_swap, test_blank]
driver_block_device.refresh_conn_infos(block_device_mapping,
mock.sentinel.refresh_context,
mock.sentinel.refresh_instance,
mock.sentinel.refresh_vol_api,
mock.sentinel.refresh_virt_drv)
for test_mock in [test_volume, test_image, test_snapshot, test_blank]:
test_mock.refresh_connection_info.assert_called_once_with(
mock.sentinel.refresh_context,
mock.sentinel.refresh_instance,
mock.sentinel.refresh_vol_api,
mock.sentinel.refresh_virt_drv)
# NOTE(lyarwood): Can't think of a better way of testing this as we
# can't assert_not_called if the method isn't in the spec.
self.assertFalse(hasattr(test_eph, 'refresh_connection_info'))
self.assertFalse(hasattr(test_swap, 'refresh_connection_info'))
| |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from functools import reduce
import collections
import math
import os
import warnings
import logging
import six
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.core import CommContext
import paddle.fluid.framework as framework
from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
from paddle.fluid.incubate.fleet.parameter_server.ir import vars_metatools
from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import RoundRobin, PSDispatcher
from paddle.fluid.transpiler.details.program_utils import delete_ops
OP_NAME_SCOPE = "op_namescope"
CLIP_OP_NAME_SCOPE = "gradient_clip"
STEP_COUNTER = "@PS_STEP_COUNTER@"
LEARNING_RATE_DECAY_COUNTER = "@LR_DECAY_COUNTER@"
OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName()
RPC_OP_ROLE_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleAttrName()
RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
LR_SCHED_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.LRSched
OPT_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.Optimize
SPARSE_OP_LIST = ["lookup_table", "lookup_table_v2"]
SPARSE_OP_TYPE_DICT = {"lookup_table": "W", "lookup_table_v2": "W"}
def _get_lr_ops(program):
lr_ops = []
for index, op in enumerate(program.global_block().ops):
role_id = int(op.attr(RPC_OP_ROLE_ATTR_NAME))
if role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) or \
role_id == int(LR_SCHED_OP_ROLE_ATTR_VALUE) | \
int(OPT_OP_ROLE_ATTR_VALUE):
lr_ops.append(op)
return lr_ops
def _has_global_step(lr_ops):
if len(lr_ops) > 0:
for idx, op in enumerate(lr_ops):
if op.type != 'increment':
continue
counter = op.input("X")[0]
if counter == LEARNING_RATE_DECAY_COUNTER:
return True
return False
def is_sparse_op(op):
if op.type in SPARSE_OP_LIST and op.attr('is_sparse') is True and op.attr(
'is_distributed') is False:
return True
if op.type == "distributed_lookup_table" and op.attr(
'is_distributed') is False:
return True
return False
def is_distributed_sparse_op(op):
if op.type in SPARSE_OP_LIST and op.attr('is_distributed') is True:
return True
if op.type == "distributed_lookup_table" and op.attr(
'is_distributed') is True:
return True
return False
def get_sparse_tablename(op):
return op.input("W")[0]
def get_sparse_tablenames(program, is_distributed):
tablenames = set()
if is_distributed:
for op in program.global_block().ops:
if is_distributed_sparse_op(op):
tablenames.add(get_sparse_tablename(op))
else:
for op in program.global_block().ops:
if is_sparse_op(op):
tablenames.add(get_sparse_tablename(op))
return list(tablenames)
class MergedVariable:
def __init__(self, merged, ordered, offsets):
self.merged_var = merged
self.ordered_vars = ordered
self.offsets = offsets
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@Singleton
class CompileTimeStrategy(object):
def __init__(self, main_program, startup_program, strategy, role_maker):
self.min_block_size = 81920
self.origin_main_program = main_program
self.origin_startup_program = startup_program
self.origin_ps_main_program = main_program
self.origin_ps_startup_program = startup_program
self.strategy = strategy
self.role_maker = role_maker
self.use_ps_gpu = False
try:
self.is_heter_ps_mode = role_maker._is_heter_parameter_server_mode
except:
warnings.warn(
"Using paddle.distributed.fleet instead of paddle.fluid.incubate.fleet"
)
self.is_heter_ps_mode = False
self.origin_sparse_pairs = []
self.origin_dense_pairs = []
self.merged_variables_pairs = []
self.merged_dense_pairs = []
self.merged_sparse_pairs = []
self.merged_variable_map = {}
self.param_name_to_grad_name = {}
self.grad_name_to_param_name = {}
self.param_grad_ep_mapping = collections.OrderedDict()
self.grad_param_mapping = collections.OrderedDict()
self._build_var_distributed()
self.tensor_table_dict = {}
# for heter-ps save variables
self.origin_merged_variables_pairs = list(self.merged_variables_pairs)
self.origin_merged_dense_pairs = list(self.merged_dense_pairs)
self.origin_merged_sparse_pairs = list(self.merged_sparse_pairs)
def get_distributed_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode
def is_sync_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.SYNC
def is_geo_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.GEO
def is_async_mode(self):
trainer = self.strategy.get_trainer_runtime_config()
return trainer.mode == DistributedMode.ASYNC
def get_role_id(self):
try:
return self.role_maker._role_id()
except Exception:
return self.role_maker.role_id()
def get_trainers(self):
try:
return self.role_maker._worker_num()
except Exception:
return self.role_maker.worker_num()
def get_ps_endpoint(self):
try:
return self.role_maker._get_pserver_endpoints()[self.get_role_id()]
except Exception:
return self.role_maker.get_pserver_endpoints()[self.get_role_id()]
def get_ps_endpoints(self):
try:
return self.role_maker._get_pserver_endpoints()
except Exception:
return self.role_maker.get_pserver_endpoints()
def get_heter_worker_endpoints(self):
try:
return self.role_maker._get_heter_worker_endpoints()
except Exception:
return self.role_maker.get_heter_worker_endpoints()
def get_next_stage_trainers(self):
try:
return self.role_maker._get_next_trainers()
except Exception:
return self.role_maker.get_next_trainers()
def get_heter_worker_endpoint(self):
try:
return self.role_maker._get_heter_worker_endpoint()
except Exception:
return self.role_maker.get_heter_worker_endpoint()
def get_trainer_endpoints(self):
try:
return self.role_maker._get_trainer_endpoints()
except Exception:
return self.role_maker.get_trainer_endpoints()
def get_trainer_endpoint(self):
try:
return self.role_maker._get_trainer_endpoint()
except Exception:
return self.role_maker.get_trainer_endpoint()
def get_previous_stage_trainers(self):
try:
return self.role_maker._get_previous_trainers()
except Exception:
return self.role_maker.get_previous_trainers()
def get_origin_programs(self):
return self.origin_main_program, self.origin_startup_program
def get_origin_main_program(self):
return self.origin_main_program
def get_origin_startup_program(self):
return self.origin_startup_program
def set_origin_ps_main_program(self, program):
self.origin_ps_main_program = program
def set_origin_ps_startup_program(self, program):
self.origin_ps_startup_program = program
def get_origin_ps_main_program(self):
return self.origin_ps_main_program
def get_origin_ps_startup_program(self):
return self.origin_ps_startup_program
def add_tensor_table(self,
feed_var_name,
fetch_var_name="",
startup_program=None,
main_program=None,
tensor_table_class=""):
self.tensor_table_dict[feed_var_name] = {}
self.tensor_table_dict[feed_var_name]["feed_var_name"] = feed_var_name
self.tensor_table_dict[feed_var_name]["fetch_var_name"] = fetch_var_name
self.tensor_table_dict[feed_var_name][
"startup_program"] = startup_program
self.tensor_table_dict[feed_var_name]["main_program"] = main_program
self.tensor_table_dict[feed_var_name][
"tensor_table_class"] = tensor_table_class
def get_tensor_table_dict(self):
return self.tensor_table_dict
def get_sparse_varname_on_ps(self, is_distributed, endpoint=None):
if not endpoint:
endpoint = self.get_ps_endpoint()
varnames = get_sparse_tablenames(self.get_origin_main_program(),
is_distributed)
ps_sparse_varnames = []
for varname in varnames:
tables = self.get_var_distributed(varname, True)
for i in range(len(tables)):
table, ep, _ = tables[i]
if ep == endpoint:
ps_sparse_varnames.append(table)
return ps_sparse_varnames
def get_optimize_varname_on_ps(self, param_name):
origin_param_name, _, _ = _get_varname_parts(param_name)
optimize_var_names = []
for op in self.get_origin_main_program().global_block().ops:
# check all optimizer op
if int(op.all_attrs()["op_role"]) == 2:
# check param name
if op.input("Param")[0] != origin_param_name:
continue
# check all input
for key in op.input_names:
if key in [
"Param", "Grad", "LearningRate", "Beta1Tensor",
"Beta2Tensor"
]:
continue
# check varibale shape related param, e.g: Moment1
optimize_var_names += self._get_optimizer_param_related_var_name(
op, op.type, key)
return optimize_var_names
def _get_optimizer_param_related_var_name(self, op, op_type, varkey):
"""
Returns the names for optimizer inputs that need to be load
"""
related_var_names = []
if op_type == "adam":
if varkey in ["Moment1", "Moment2"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "adagrad":
if varkey == "Moment":
related_var_names.append(op.input(varkey)[0])
elif op_type in ["momentum", "lars_momentum"]:
if varkey == "Velocity":
related_var_names.append(op.input(varkey)[0])
elif op_type == "rmsprop":
if varkey in ["Moment", "MeanSquare"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "ftrl":
if varkey in ["SquaredAccumulator", "LinearAccumulator"]:
related_var_names.append(op.input(varkey)[0])
elif op_type == "sgd":
pass
else:
raise ValueError(
"Not supported optimizer for distributed training: %s" %
op_type)
return related_var_names
def build_ctx(self,
vars,
mapping,
is_grad,
is_sparse,
is_send,
is_distributed=False):
def get_grad_var_ep(slices):
names = []
eps = []
sections = []
for slice in slices:
if self.is_geo_mode():
if is_send:
names.append("{}.delta".format(slice.name))
else:
names.append(slice.name)
elif is_grad and self.is_sync_mode() and self.get_trainers(
) > 1:
names.append("{}.trainer_{}".format(slice.name,
self.get_role_id()))
else:
names.append(slice.name)
sections.append(slice.shape[0])
for ep, pairs in self.param_grad_ep_mapping.items():
params, grads = pairs["params"], pairs["grads"]
for var in params + grads:
if slice.name == var.name:
eps.append(ep)
break
return names, eps, sections
if isinstance(vars, MergedVariable):
name = vars.merged_var.name
slices = mapping[name]
names, eps, sections = get_grad_var_ep(slices)
origin_varnames = [var.name for var in vars.ordered_vars]
else:
name = vars.name
slices = mapping[name]
names, eps, sections = get_grad_var_ep(slices)
origin_varnames = [vars.name]
trainer_id = self.get_role_id()
aggregate = True
ctx = CommContext(name, names, eps, sections, origin_varnames,
trainer_id, aggregate, is_sparse, is_distributed)
return ctx
def get_trainer_send_context(self):
send_ctx = {}
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
idx = 0
if not self.is_geo_mode():
for merged in self.merged_dense_pairs:
grad = merged[1]
ctx = self.build_ctx(grad, self.grad_var_mapping, True, False,
True)
send_ctx[ctx.var_name()] = ctx
for merged in self.merged_sparse_pairs:
param = merged[0]
grad = merged[1]
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(grad, self.grad_var_mapping, True, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
idx += 1
if self.is_async_mode():
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
else:
for pairs in self.origin_sparse_pairs:
param, grad = pairs
param_name = param.name
is_distributed = True if param_name in distibuted_varnames else False
param_ctx = self.build_ctx(param, self.param_var_mapping, False,
True, True, is_distributed)
grad_ctx = self.build_ctx(grad, self.grad_var_mapping, True,
True, True, is_distributed)
ctx = CommContext(param_ctx.var_name(),
param_ctx.split_varnames(),
param_ctx.split_endpoints(),
param_ctx.sections(),
grad_ctx.origin_varnames(),
param_ctx.trainer_id(),
param_ctx.aggregate(),
param_ctx.is_sparse(),
param_ctx.is_distributed())
send_ctx[ctx.var_name()] = ctx
idx += 1
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
def get_communicator_send_context(self):
send_ctx = {}
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
idx = 0
if self.is_geo_mode():
for pairs in self.merged_dense_pairs:
param = pairs[0]
ctx = self.build_ctx(param, self.param_var_mapping, False,
False, True)
send_ctx[ctx.var_name()] = ctx
for pairs in self.merged_sparse_pairs:
param = pairs[0]
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
idx += 1
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
else:
for merged in self.merged_dense_pairs:
grad = merged[1]
ctx = self.build_ctx(grad, self.grad_var_mapping, True, False,
True)
send_ctx[ctx.var_name()] = ctx
for merged in self.merged_sparse_pairs:
param, grad = merged
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
ctx = self.build_ctx(grad, self.grad_var_mapping, True, True,
True, is_distributed)
send_ctx[ctx.var_name()] = ctx
idx += 1
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
def get_communicator_recv_context(self,
recv_type=1,
use_origin_program=False):
# recv_type
# 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
sparse_varnames = []
for pairs in self.origin_sparse_pairs:
param, grad = pairs
sparse_varnames.append(param.name)
dense_recv_ctx = {}
sparse_recv_ctx = {}
distributed_recv_ctx = {}
variables_pairs = self.merged_variables_pairs if not use_origin_program else self.origin_merged_variables_pairs
for merged in variables_pairs:
params = merged[0]
if params.merged_var.name in sparse_varnames:
continue
ctx = self.build_ctx(params, self.param_var_mapping, False, False,
False, False)
dense_recv_ctx[ctx.var_name()] = ctx
for pairs in self.origin_sparse_pairs:
param, grad = pairs
if param.name in distibuted_varnames:
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
False, True)
distributed_recv_ctx[ctx.var_name()] = ctx
else:
ctx = self.build_ctx(param, self.param_var_mapping, False, True,
False, False)
sparse_recv_ctx[ctx.var_name()] = ctx
if recv_type == 1:
return dense_recv_ctx
if recv_type == 2:
return sparse_recv_ctx
if recv_type == 3:
return distributed_recv_ctx
if recv_type == 4:
dense_recv_ctx.update(sparse_recv_ctx)
dense_recv_ctx.update(distributed_recv_ctx)
return dense_recv_ctx
assert ValueError(
"recv_type can only be 1/2/3/4, 1 : DENSE 2. SPARSE 3. DISTRIBUTED 4. ALL"
)
def get_the_one_trainer_send_context(self, split_dense_table):
if self.is_geo_mode():
send_ctx = {}
trainer_id = self.get_role_id()
idx = 0
distibuted_varnames = get_sparse_tablenames(
self.origin_main_program, True)
for merged in self.merged_sparse_pairs:
param, grad = merged
grad_name = grad.merged_var.name
param_name = param.merged_var.name
is_distributed = True if param_name in distibuted_varnames else False
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
var_numel = reduce(lambda x, y: x * y, var.shape[1:])
sparse_ctx = CommContext(grad_name, [grad_name],
["127.0.0.1:6071"], [var_numel],
[grad_name], trainer_id, True, True,
is_distributed, idx, False)
idx += 1
send_ctx[sparse_ctx.var_name()] = sparse_ctx
if len(send_ctx) == 0:
raise ValueError(
"GeoSGD require sparse parameters in your net.")
if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker():
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
else:
return self.get_the_one_send_context(split_dense_table)
def get_dense_send_context(self,
send_ctx,
idx,
merged_dense_pairs,
trainer_id,
split_dense_table=False):
if len(merged_dense_pairs) < 1:
return idx
if not split_dense_table:
origin_varnames = []
var_numel = 0
for merged in merged_dense_pairs:
grad = merged[1]
origin_varnames.append(grad.merged_var.name)
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
var_numel += reduce(lambda x, y: x * y, var.shape)
grad_name = "Dense@Grad"
trainer_id = self.get_role_id()
aggregate = True
dense_ctx = CommContext(grad_name, [grad_name], ["127.0.0.1:6071"],
[var_numel], origin_varnames, trainer_id,
aggregate, False, False, idx, False)
send_ctx[grad_name] = dense_ctx
idx += 1
else:
for merged in merged_dense_pairs:
grad = merged[1]
origin_varname = grad.merged_var.name
var = self.origin_main_program.global_block().vars[
origin_varname]
var_numel = reduce(lambda x, y: x * y, var.shape)
grad_name = origin_varname
aggregate = True
dense_ctx = CommContext(grad_name, [grad_name],
["127.0.0.1:6071"], [var_numel],
[origin_varname], trainer_id, aggregate,
False, False, idx, False)
send_ctx[grad_name] = dense_ctx
idx += 1
return idx
def get_the_one_send_context(self,
split_dense_table=False,
use_origin_program=False,
ep_list=None):
if ep_list is None:
ep_list = ["127.0.0.1:6071"]
send_ctx = {}
trainer_id = self.get_role_id()
idx = 0
merged_dense_pairs = self.origin_merged_dense_pairs if use_origin_program else self.merged_dense_pairs
merged_sparse_pairs = self.origin_merged_sparse_pairs if use_origin_program else self.merged_sparse_pairs
idx += self.get_dense_send_context(send_ctx, idx, merged_dense_pairs,
trainer_id, split_dense_table)
distibuted_varnames = get_sparse_tablenames(self.origin_main_program,
True)
for merged in merged_sparse_pairs:
param, grad = merged
grad_name = grad.merged_var.name
param_name = param.merged_var.name
splited_varname = []
for i in range(len(ep_list)):
splited_varname.append("{}.block{}".format(param_name, i))
is_distributed = True if param_name in distibuted_varnames else False
var = self.origin_main_program.global_block().vars[
grad.merged_var.name]
shape = list(var.shape)
shape[0] = 0 if is_distributed else shape[0]
sparse_ctx = CommContext(grad_name, splited_varname, ep_list, shape,
[grad_name], trainer_id, True, True,
is_distributed, idx, False)
idx += 1
send_ctx[sparse_ctx.var_name()] = sparse_ctx
if len(self.tensor_table_dict) > 0 and self.role_maker._is_worker():
name, ctx = self._step_ctx(idx)
send_ctx[name] = ctx
return send_ctx
def get_the_one_recv_context(self,
is_dense=True,
split_dense_table=False,
use_origin_program=False):
recv_id_maps = {}
if is_dense:
send_ctx = self.get_the_one_send_context(
split_dense_table=split_dense_table,
use_origin_program=use_origin_program)
for idx, (name, ctx) in enumerate(send_ctx.items()):
if ctx.is_sparse():
continue
if ctx.is_tensor_table():
continue
origin_grad_varnames = ctx.origin_varnames()
param_names = []
for grad_varname in origin_grad_varnames:
param_name = self.grad_name_to_param_name[grad_varname]
param_names.append(param_name)
recv_id_maps[ctx.table_id()] = param_names
else:
send_ctx = self.get_the_one_send_context()
for idx, (name, ctx) in enumerate(send_ctx.items()):
if not ctx.is_sparse():
continue
origin_grad_varnames = ctx.origin_varnames()
param_names = []
for grad_varname in origin_grad_varnames:
param_name = self.grad_name_to_param_name[grad_varname]
param_names.append(param_name)
recv_id_maps[ctx.table_id()] = param_names
return recv_id_maps
def get_server_runtime_config(self):
return self.strategy.get_server_runtime_config()
def get_var_distributed(self, varname, is_param):
var_distributed = []
offset = 0
if is_param:
params = self.param_var_mapping[varname]
param_varnames = [var.name for var in params]
for ep, pairs in self.param_grad_ep_mapping.items():
for p in pairs["params"]:
if p.name in param_varnames:
offset += p.shape[0]
var_distributed.append((p.name, ep, p.shape[0]))
else:
grads = self.grad_var_mapping[varname]
grad_varnames = [var.name for var in grads]
for ep, pairs in self.param_grad_ep_mapping.items():
for g in pairs["grads"]:
if g.name in grad_varnames:
var_distributed.append((g.name, ep, g.shape[0]))
return var_distributed
def _step_ctx(self, idx):
name = STEP_COUNTER
trainer_id = self.get_role_id()
endpoints = self.get_ps_endpoints()
sections = [1] * len(endpoints)
names = [name] * len(endpoints)
ctx = CommContext(name, names, endpoints, sections, [name], trainer_id,
True, False, False, idx, True)
return name, ctx
def _create_vars_from_blocklist(self, block_list):
"""
Create vars for each split.
NOTE: only grads need to be named for different trainers, use
add_trainer_suffix to rename the grad vars.
Args:
block_list (list[(varname, block_id, block_size)]): List of gradient blocks.
add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True.
Returns:
var_mapping (collections.OrderedDict(varname->[new_varname_variable])):A dict mapping
from original var name to each var split.
"""
# varname->[(block_id, current_block_size)]
block_map = collections.OrderedDict()
var_mapping = collections.OrderedDict()
for block_str in block_list:
varname, offset, size = block_str.split(":")
if varname not in block_map:
block_map[varname] = []
block_map[varname].append((int(offset), int(size)))
for varname, split in six.iteritems(block_map):
orig_var = self.merged_variable_map[varname]
if len(split) == 1:
var_mapping[varname] = [orig_var]
self.var_distributed.add_distributed_var(
origin_var=orig_var,
slice_var=orig_var,
block_id=0,
offset=0,
is_slice=False,
vtype="Param")
else:
var_mapping[varname] = []
orig_shape = orig_var.shape
orig_dim1_flatten = 1
if len(orig_shape) >= 2:
orig_dim1_flatten = reduce(lambda x, y: x * y,
orig_shape[1:])
for i, block in enumerate(split):
size = block[1]
rows = size // orig_dim1_flatten
splited_shape = [rows]
if len(orig_shape) >= 2:
splited_shape.extend(orig_shape[1:])
new_var_name = "%s.block%d" % (varname, i)
slice_var = vars_metatools.VarStruct(
name=new_var_name,
shape=splited_shape,
dtype=orig_var.dtype,
type=orig_var.type,
lod_level=orig_var.lod_level,
persistable=False)
var_mapping[varname].append(slice_var)
self.var_distributed.add_distributed_var(
origin_var=orig_var,
slice_var=slice_var,
block_id=i,
offset=-1,
is_slice=False,
vtype="Param")
return var_mapping
def _dispatcher(self):
ps_dispatcher = RoundRobin(self.get_ps_endpoints())
ps_dispatcher.reset()
grad_var_mapping_items = list(six.iteritems(self.grad_var_mapping))
sparse_gradnames = [grad.name for _, grad in self.origin_sparse_pairs]
for grad_varname, splited_vars in grad_var_mapping_items:
if grad_varname in sparse_gradnames:
continue
send_vars = []
for _, var in enumerate(splited_vars):
send_vars.append(var)
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(self.grad_param_mapping[var])
eps = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eps):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i])
for grad_varname, splited_vars in grad_var_mapping_items:
if grad_varname not in sparse_gradnames:
continue
ps_dispatcher.reset()
send_vars = []
for _, var in enumerate(splited_vars):
send_vars.append(var)
recv_vars = []
for _, var in enumerate(send_vars):
recv_vars.append(self.grad_param_mapping[var])
eps = ps_dispatcher.dispatch(recv_vars)
for i, ep in enumerate(eps):
self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i])
self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i])
def _slice_variable(self,
var_list,
slice_count,
min_block_size,
uniform=False):
"""
We may need to split dense tensor to one or more blocks and put
them equally onto parameter server. One block is a sub-tensor
aligned by dim[0] of the tensor.
We need to have a minimal block size so that the calculations in
the parameter server side can gain better performance. By default
minimum block size 8K elements (maybe 16bit or 32bit or 64bit).
Args:
var_list (list): List of variables.
slice_count (int): Numel of count that variables will be sliced, which
could be the pserver services' count.
min_block_size (int): Minimum split block size.
Returns:
blocks (list[(varname, block_id, current_block_size)]): A list
of VarBlocks. Each VarBlock specifies a shard of the var.
"""
blocks = []
for var in var_list:
if not uniform:
var_numel = reduce(lambda x, y: x * y, var.shape)
split_count = 1
if min_block_size == -1:
split_count = 1
else:
split_count = slice_count
max_pserver_count = int(
math.floor(var_numel / float(min_block_size)))
if max_pserver_count == 0:
max_pserver_count = 1
if max_pserver_count < slice_count:
split_count = max_pserver_count
block_size = int(math.ceil(var_numel / float(split_count)))
if len(var.shape) >= 2:
# align by dim1(width)
dim1 = reduce(lambda x, y: x * y, var.shape[1:])
remains = block_size % dim1
if remains != 0:
block_size += dim1 - remains
# update split_count after aligning
split_count = int(math.ceil(var_numel / float(block_size)))
for block_id in range(split_count):
curr_block_size = min(block_size, var_numel - (
(block_id) * block_size))
block = vars_metatools.VarBlock(var.name, block_id,
curr_block_size)
blocks.append(str(block))
else:
block_size = var.shape[0] / slice_count
remainder = var.shape[0] % slice_count
if block_size == 0:
dim0s = [block_size] * remainder
else:
dim0s = [block_size] * slice_count
for i in range(remainder):
dim0s[i] = dim0s[i] + 1
dim1 = reduce(lambda x, y: x * y, var.shape[1:])
for block_id in range(len(dim0s)):
numel = dim0s[block_id] * dim1
block = vars_metatools.VarBlock(var.name, block_id, numel)
blocks.append(str(block))
return blocks
def _get_param_grad_blocks(self, pairs, min_block_size, uniform=False):
param_list = []
grad_list = []
param_grad_set = set()
for p, g in pairs:
# todo(tangwei12) skip parameter marked not trainable
# if type(p) == Parameter and p.trainable == False:
# continue
p = p.merged_var
g = g.merged_var
if p.name not in param_grad_set:
param_list.append(p)
param_grad_set.add(p.name)
if g.name not in param_grad_set:
grad_list.append(g)
param_grad_set.add(g.name)
# when we slice var up into blocks, we will slice the var according to
# pserver services' count. A pserver may have two or more listening ports.
grad_blocks = self._slice_variable(grad_list,
len(self.get_ps_endpoints()),
min_block_size, uniform)
param_blocks = self._slice_variable(param_list,
len(self.get_ps_endpoints()),
min_block_size, uniform)
return param_blocks, grad_blocks
def _var_slice_and_distribute(self):
# update these mappings for further transpile:
# 1. param_var_mapping : param var name->[split params vars]
# 2. grad_var_mapping : grad var name->[split grads vars]
# 3. grad_param_mapping : grad.blockx->param.blockx
# 4. param_grad_ep_mapping : ep->{"params" : [], "grads" : [] }
dps, dgs = self._get_param_grad_blocks(self.merged_dense_pairs,
self.min_block_size, False)
sps, sgs = self._get_param_grad_blocks(self.merged_sparse_pairs,
self.min_block_size, True)
param_blocks = dps + sps
grad_blocks = dgs + sgs
assert (len(grad_blocks) == len(param_blocks))
# origin_param_name->[splited_param_vars]
self.param_var_mapping = self._create_vars_from_blocklist(param_blocks)
self.grad_var_mapping = self._create_vars_from_blocklist(grad_blocks)
# dict(grad_splited_var->param_splited_var)
self.grad_param_mapping = collections.OrderedDict()
for g, p in zip(grad_blocks, param_blocks):
g_name, g_bid, _ = g.split(":")
p_name, p_bid, _ = p.split(":")
self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \
self.param_var_mapping[p_name][int(p_bid)]
print_maps = {}
for k, v in self.grad_param_mapping.items():
print_maps[str(k)] = str(v)
# create mapping of endpoint->split var to create pserver side program
self.param_grad_ep_mapping = collections.OrderedDict()
[
self.param_grad_ep_mapping.update({
ep: {
"params": [],
"grads": []
}
}) for ep in self.get_ps_endpoints()
]
def _build_var_distributed(self):
self.var_distributed = vars_metatools.VarsDistributed()
sparse_pairs, dense_pairs = self.get_param_grads()
origin_for_sparse = []
origin_for_dense = []
param_name_grad_name = dict()
grad_name_to_param_name = dict()
for param, grad in sparse_pairs:
param = vars_metatools.create_var_struct(param)
grad = vars_metatools.create_var_struct(grad)
origin_for_sparse.append((param, grad))
for param, grad in dense_pairs:
param = vars_metatools.create_var_struct(param)
grad = vars_metatools.create_var_struct(grad)
origin_for_dense.append((param, grad))
for dense_pair in origin_for_dense:
param, grad = dense_pair
m_param = MergedVariable(param, [param], [0])
m_grad = MergedVariable(grad, [grad], [0])
self.merged_variables_pairs.append((m_param, m_grad))
self.merged_dense_pairs.append((m_param, m_grad))
for sparse_pair in origin_for_sparse:
param, grad = sparse_pair
m_param = MergedVariable(param, [param], [0])
m_grad = MergedVariable(grad, [grad], [0])
self.merged_variables_pairs.append((m_param, m_grad))
self.merged_sparse_pairs.append((m_param, m_grad))
for merged in self.merged_variables_pairs:
m_param, m_grad = merged
self.merged_variable_map[
m_param.merged_var.name] = m_param.merged_var
self.merged_variable_map[m_grad.merged_var.name] = m_grad.merged_var
param_merges = []
param_merges.extend(origin_for_sparse)
param_merges.extend(origin_for_dense)
for param, grad in param_merges:
param_name_grad_name[param.name] = grad.name
grad_name_to_param_name[grad.name] = param.name
self.origin_sparse_pairs = origin_for_sparse
self.origin_dense_pairs = origin_for_dense
self.param_name_to_grad_name = param_name_grad_name
self.grad_name_to_param_name = grad_name_to_param_name
sparse_pair_map = collections.OrderedDict()
for pair in self.origin_sparse_pairs + self.origin_dense_pairs:
param, grad = pair
sparse_pair_map[param.name] = str(param)
sparse_pair_map[grad.name] = str(grad)
self._var_slice_and_distribute()
self._dispatcher()
def get_param_grads(self):
origin_program = self.origin_main_program
def _get_params_grads(sparse_varnames):
block = origin_program.global_block()
dense_param_grads = []
sparse_param_grads = []
optimize_params = set()
origin_var_dict = origin_program.global_block().vars
role_id = int(core.op_proto_and_checker_maker.OpRole.Backward)
for op in block.ops:
if _is_opt_role_op(op):
# delete clip op from opt_ops when run in Parameter Server mode
if OP_NAME_SCOPE in op.all_attrs() \
and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE):
op._set_attr("op_role", role_id)
continue
if op.attr(OP_ROLE_VAR_ATTR_NAME):
param_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[0]
grad_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[1]
if param_name not in optimize_params:
optimize_params.add(param_name)
param_grad = (origin_var_dict[param_name],
origin_var_dict[grad_name])
if param_name in sparse_varnames:
sparse_param_grads.append(param_grad)
else:
dense_param_grads.append(param_grad)
return sparse_param_grads, dense_param_grads
def _get_sparse_varnames():
varnames = []
for op in origin_program.global_block().ops:
if op.type in SPARSE_OP_TYPE_DICT.keys() \
and op.attr('remote_prefetch') is True:
param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0]
varnames.append(param_name)
return list(set(varnames))
sparse_varnames = _get_sparse_varnames()
sparse_param_grads, dense_param_grads = _get_params_grads(
sparse_varnames)
return sparse_param_grads, dense_param_grads
def remove_var_pair_by_grad(self, var_name):
for index, pair in enumerate(self.merged_variables_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_variables_pairs[index]
for index, pair in enumerate(self.merged_dense_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_dense_pairs[index]
return
for index, pair in enumerate(self.merged_sparse_pairs):
var = pair[0]
var_grad = pair[1]
if var_grad.merged_var.name == var_name:
del self.merged_sparse_pairs[index]
return
print("Not find {} in self.merge_pairs".format(var_name))
def _is_opt_role_op(op):
# NOTE : depend on oprole to find out whether this op is for
# optimize
op_maker = core.op_proto_and_checker_maker
optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize
if op_maker.kOpRoleAttrName() in op.attr_names and \
int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role):
return True
return False
def _get_optimize_ops(_program):
block = _program.global_block()
opt_ops = []
for op in block.ops:
if _is_opt_role_op(op):
# delete clip op from opt_ops when run in Parameter Server mode
if OP_NAME_SCOPE in op.all_attrs() \
and CLIP_OP_NAME_SCOPE in op.attr(OP_NAME_SCOPE):
op._set_attr(
"op_role",
int(core.op_proto_and_checker_maker.OpRole.Backward))
continue
opt_ops.append(op)
return opt_ops
def _add_lr_decay_table_pass(main_program, compiled_config, lr_decay_steps):
if hasattr(compiled_config.origin_main_program, 'lr_sheduler'):
from paddle.optimizer.lr import LRScheduler
assert isinstance(compiled_config.origin_main_program.lr_sheduler,
LRScheduler), "must be LRScheduler"
ops = _get_optimize_ops(compiled_config.origin_main_program)
lr_param_dict = _get_lr_param_dict(ops)
lr_decay_main_program, lr_decay_startup_program, lr_name = _get_lr_sheduler_program(
compiled_config.origin_main_program.lr_sheduler, lr_param_dict,
lr_decay_steps)
compiled_config.add_tensor_table(
"@LR_DECAY_COUNTER@", lr_name, lr_decay_startup_program,
lr_decay_main_program, "GlobalStepTable")
def _get_lr_param_dict(opt_ops):
lr_param_dict = {}
for op in opt_ops:
lr_name = op.input("LearningRate")[0]
param_name = op.input("Param")[0]
if lr_name not in lr_param_dict:
lr_param_dict[lr_name] = []
lr_param_dict[lr_name].append(param_name)
return lr_param_dict
def _get_lr_sheduler_program(lr_sheduler, lr_param_dict, lr_decay_steps):
schedler_decay = [
'NoamDecay', 'NaturalExpDecay', 'InverseTimeDecay', 'ExponentialDecay'
]
from paddle.optimizer.lr import ExponentialDecay, NoamDecay, PiecewiseDecay, NaturalExpDecay, InverseTimeDecay
from paddle.fluid.layers.learning_rate_scheduler import exponential_decay, noam_decay, piecewise_decay, natural_exp_decay, inverse_time_decay
decay_main_program = fluid.framework.Program()
decay_startup_program = fluid.framework.Program()
lr_name = ""
if isinstance(lr_sheduler, ExponentialDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = exponential_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True)
lr_name = lr.name
logging.warn(
"ExponentialDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
elif isinstance(lr_sheduler, NoamDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = noam_decay(lr_sheduler.d_model, lr_sheduler.warmup_steps, 1.0)
lr_name = lr.name
logging.warn("NoamDecay is set, warmup steps is [ %d ]" %
lr_sheduler.warmup_steps)
elif isinstance(lr_sheduler, NaturalExpDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = natural_exp_decay(1.0, lr_decay_steps, lr_sheduler.gamma, True)
lr_name = lr.name
logging.warn(
"NaturalExpDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
elif isinstance(lr_sheduler, InverseTimeDecay):
with fluid.program_guard(decay_main_program, decay_startup_program):
lr = inverse_time_decay(1.0, lr_decay_steps, lr_sheduler.gamma,
True)
lr_name = lr.name
logging.warn(
"InverseTimeDecay is set, staircase = True, global learning rate decay step is [ %d ], Change decay steps as follow: \n"
"\t strategy = paddle.distributed.fleet.DistributedStrategy() \n "
"\t strategy.a_sync = True \n"
"\t strategy.a_sync_configs= { 'lr_decay_steps' : YOUR_DECAY_STEP } \n"
% lr_decay_steps)
else:
raise ValueError(
"Not supported current LearningRate strategy, please use follow decay strategy: {}".
format(schedler_decay))
return decay_main_program, decay_startup_program, lr_name
def _get_varname_parts(varname):
# returns origin, blockid, trainerid
orig_var_name = ""
trainer_part = ""
block_part = ""
trainer_idx = varname.find(".trainer_")
if trainer_idx >= 0:
trainer_part = varname[trainer_idx + 1:]
else:
trainer_idx = len(varname)
block_index = varname.find(".block")
if block_index >= 0:
block_part = varname[block_index + 1:trainer_idx]
else:
block_index = len(varname)
orig_var_name = varname[0:min(block_index, trainer_idx)]
return orig_var_name, block_part, trainer_part
def _orig_varname(varname):
orig, _, _ = _get_varname_parts(varname)
return orig
| |
import time
import rlp
from rlp.sedes import big_endian_int, Binary, binary, CountableList
from rlp.utils import decode_hex, encode_hex, str_to_bytes
from ethereum import trie
from ethereum.trie import Trie
from ethereum.securetrie import SecureTrie
from ethereum import utils
from ethereum.utils import address, int256, trie_root, hash32, to_string
from ethereum import processblock
from ethereum.transactions import Transaction
from ethereum import bloom
import sys
if sys.version_info.major == 2:
from repoze.lru import lru_cache
ETHASH_LIB = 'pyethash'
else:
from functools import lru_cache
ETHASH_LIB = 'ethash'
from ethereum.exceptions import *
from ethereum.slogging import get_logger
from ethereum.genesis_allocation import GENESIS_INITIAL_ALLOC
log = get_logger('eth.block')
log_state = get_logger('eth.msg.state')
Log = processblock.Log
if ETHASH_LIB == 'ethash':
from ethereum import ethash, ethash_utils
mkcache = ethash.mkcache
serialize_cache = ethash_utils.serialize_cache
deserialize_cache = ethash_utils.deserialize_cache
EPOCH_LENGTH = ethash_utils.EPOCH_LENGTH
get_cache_size = ethash_utils.get_cache_size
get_full_size = ethash_utils.get_full_size
hashimoto_light = ethash.hashimoto_light
elif ETHASH_LIB == 'pyethash':
import pyethash
mkcache = pyethash.mkcache_bytes
serialize_cache = lambda x: x
deserialize_cache = lambda x: x
EPOCH_LENGTH = pyethash.EPOCH_LENGTH
get_cache_size = pyethash.get_cache_size
get_full_size = pyethash.get_full_size
hashimoto_light = lambda s, c, h, n: \
pyethash.hashimoto_light(s, c, h, utils.big_endian_to_int(n))
else:
raise Exception("invalid ethash library set")
# Genesis block difficulty
GENESIS_DIFFICULTY = 131072
# Genesis block gas limit
GENESIS_GAS_LIMIT = 3141592
# Genesis block prevhash, coinbase, nonce
GENESIS_PREVHASH = b'\x00' * 32
GENESIS_COINBASE = b'\x00' * 20
GENESIS_NONCE = utils.zpad(utils.encode_int(42), 8)
GENESIS_MIXHASH = b'\x00' * 32
# Minimum gas limit
MIN_GAS_LIMIT = 125000
# Gas limit adjustment algo:
# block.gas_limit = block.parent.gas_limit * 1023/1024 +
# (block.gas_used * 6 / 5) / 1024
GASLIMIT_EMA_FACTOR = 1024
GASLIMIT_ADJMAX_FACTOR = 1024
BLKLIM_FACTOR_NOM = 3
BLKLIM_FACTOR_DEN = 2
# Block reward
BLOCK_REWARD = 1500 * utils.denoms.finney
# GHOST constants
UNCLE_DEPTH_PENALTY_FACTOR = 8
NEPHEW_REWARD = BLOCK_REWARD / 32
MAX_UNCLE_DEPTH = 6 # max (block.number - uncle.number)
# Difficulty adjustment constants
DIFF_ADJUSTMENT_CUTOFF = 8
BLOCK_DIFF_FACTOR = 2048
MIN_DIFF = 131072
# PoW info
POW_EPOCH_LENGTH = 30000
# Difficulty adjustment algo
def calc_difficulty(parent, timestamp):
offset = parent.difficulty // BLOCK_DIFF_FACTOR
sign = 1 if timestamp - parent.timestamp < DIFF_ADJUSTMENT_CUTOFF else -1
# If we enter a special mode where the genesis difficulty starts off below
# the minimal difficulty, we allow low-difficulty blocks (this will never
# happen in the official protocol)
return int(max(parent.difficulty + offset * sign, min(parent.difficulty, MIN_DIFF)))
# Auxiliary value for must_* error messages
aux = [None]
def set_aux(auxval):
aux[0] = auxval
def must(what, f, symb, a, b):
if not f(a, b):
if aux[0]:
sys.stderr.write('%r' % aux[0])
raise VerificationFailed(what, a, symb, b)
def must_equal(what, a, b):
return must(what, lambda x, y: x == y, "==", a, b)
def must_ge(what, a, b):
return must(what, lambda x, y: x >= y, ">=", a, b)
def must_le(what, a, b):
return must(what, lambda x, y: x <= y, "<=", a, b)
class Account(rlp.Serializable):
"""An Ethereum account.
:ivar nonce: the account's nonce (the number of transactions sent by the
account)
:ivar balance: the account's balance in wei
:ivar storage: the root of the account's storage trie
:ivar code_hash: the SHA3 hash of the code associated with the account
:ivar db: the database in which the account's code is stored
"""
fields = [
('nonce', big_endian_int),
('balance', big_endian_int),
('storage', trie_root),
('code_hash', hash32)
]
def __init__(self, nonce, balance, storage, code_hash, db):
self.db = db
super(Account, self).__init__(nonce, balance, storage, code_hash)
@property
def code(self):
"""The EVM code of the account.
This property will be read from or written to the db at each access,
with :ivar:`code_hash` used as key.
"""
return self.db.get(self.code_hash)
@code.setter
def code(self, value):
self.code_hash = utils.sha3(value)
self.db.put(self.code_hash, value)
@classmethod
def blank_account(cls, db):
"""Create a blank account
The returned account will have zero nonce and balance, a blank storage
trie and empty code.
:param db: the db in which the account will store its code.
"""
code_hash = utils.sha3(b'')
db.put(code_hash, b'')
return cls(0, 0, trie.BLANK_ROOT, code_hash, db)
class Receipt(rlp.Serializable):
fields = [
('state_root', trie_root),
('gas_used', big_endian_int),
('bloom', int256),
('logs', CountableList(processblock.Log))
]
def __init__(self, state_root, gas_used, logs, bloom=None):
self.state_root = state_root
self.gas_used = gas_used
self.logs = logs
if bloom is not None and bloom != self.bloom:
raise ValueError("Invalid bloom filter")
@property
def bloom(self):
bloomables = [x.bloomables() for x in self.logs]
return bloom.bloom_from_list(utils.flatten(bloomables))
class BlockHeader(rlp.Serializable):
"""A block header.
If the block with this header exists as an instance of :class:`Block`, the
connection can be made explicit by setting :attr:`BlockHeader.block`. Then,
:attr:`BlockHeader.state_root`, :attr:`BlockHeader.tx_list_root` and
:attr:`BlockHeader.receipts_root` always refer to the up-to-date value in
the block instance.
:ivar block: an instance of :class:`Block` or `None`
:ivar prevhash: the 32 byte hash of the previous block
:ivar uncles_hash: the 32 byte hash of the RLP encoded list of uncle
headers
:ivar coinbase: the 20 byte coinbase address
:ivar state_root: the root of the block's state trie
:ivar tx_list_root: the root of the block's transaction trie
:ivar receipts_root: the root of the block's receipts trie
:ivar bloom: TODO
:ivar difficulty: the block's difficulty
:ivar number: the number of ancestors of this block (0 for the genesis
block)
:ivar gas_limit: the block's gas limit
:ivar gas_used: the total amount of gas used by all transactions in this
block
:ivar timestamp: a UNIX timestamp
:ivar extra_data: up to 1024 bytes of additional data
:ivar nonce: a 32 byte nonce constituting a proof-of-work, or the empty
string as a placeholder
"""
fields = [
('prevhash', hash32),
('uncles_hash', hash32),
('coinbase', address),
('state_root', trie_root),
('tx_list_root', trie_root),
('receipts_root', trie_root),
('bloom', int256),
('difficulty', big_endian_int),
('number', big_endian_int),
('gas_limit', big_endian_int),
('gas_used', big_endian_int),
('timestamp', big_endian_int),
('extra_data', binary),
('mixhash', binary),
('nonce', Binary(8, allow_empty=True))
]
def __init__(self,
prevhash=GENESIS_PREVHASH,
uncles_hash=utils.sha3rlp([]),
coinbase=GENESIS_COINBASE,
state_root=trie.BLANK_ROOT,
tx_list_root=trie.BLANK_ROOT,
receipts_root=trie.BLANK_ROOT,
bloom=0,
difficulty=GENESIS_DIFFICULTY,
number=0,
gas_limit=GENESIS_GAS_LIMIT,
gas_used=0,
timestamp=0,
extra_data='',
mixhash=GENESIS_MIXHASH,
nonce=''):
# at the beginning of a method, locals() is a dict of all arguments
fields = {k: v for k, v in locals().items() if k != 'self'}
if len(fields['coinbase']) == 40:
fields['coinbase'] = decode_hex(fields['coinbase'])
assert len(fields['coinbase']) == 20
self.block = None
super(BlockHeader, self).__init__(**fields)
@classmethod
def from_block_rlp(self, rlp_data):
block_data = rlp.decode_lazy(rlp_data)
return super(BlockHeader, self).deserialize(block_data[0])
@property
def state_root(self):
if self.block:
return self.block.state_root
else:
return self._state_root
@state_root.setter
def state_root(self, value):
if self.block:
self.block.state_root = value
else:
self._state_root = value
@property
def tx_list_root(self):
if self.block:
return self.block.tx_list_root
else:
return self._tx_list_root
@tx_list_root.setter
def tx_list_root(self, value):
if self.block:
self.block.tx_list_root = value
else:
self._tx_list_root = value
@property
def receipts_root(self):
if self.block:
return self.block.receipts_root
else:
return self._receipts_root
@receipts_root.setter
def receipts_root(self, value):
if self.block:
self.block.receipts_root = value
else:
self._receipts_root = value
@property
def hash(self):
"""The binary block hash"""
return utils.sha3(rlp.encode(self))
def hex_hash(self):
"""The hex encoded block hash"""
return encode_hex(self.hash)
@property
def mining_hash(self):
return utils.sha3(rlp.encode(self,
BlockHeader.exclude(['mixhash', 'nonce'])))
@property
def seed(self):
seed = b'\x00' * 32
for i in range(self.number // EPOCH_LENGTH):
seed = utils.sha3(seed)
return seed
def check_pow(self, db=None, nonce=None):
"""Check if the proof-of-work of the block is valid.
:param nonce: if given the proof of work function will be evaluated
with this nonce instead of the one already present in
the header
:returns: `True` or `False`
"""
nonce = nonce or self.nonce
if db is None:
assert self.block is not None
db = self.block.db
if len(self.mixhash) != 32 or len(self.nonce) != 8:
raise ValueError("Bad mixhash or nonce length")
# exclude mixhash and nonce
header_hash = self.mining_hash
seed = self.seed
# Grab current cache
current_cache_size = get_cache_size(self.number)
cache = get_cache_memoized(seed, current_cache_size)
current_full_size = get_full_size(self.number)
mining_output = hashimoto_light(current_full_size, cache, header_hash, nonce)
diff = self.difficulty
if mining_output['mix digest'] != self.mixhash:
return False
return utils.big_endian_to_int(mining_output['result']) <= 2**256 / (diff or 1)
def to_dict(self):
"""Serialize the header to a readable dictionary."""
d = {}
for field in ('prevhash', 'uncles_hash', 'extra_data', 'nonce',
'mixhash'):
d[field] = b'0x' + encode_hex(getattr(self, field))
for field in ('state_root', 'tx_list_root', 'receipts_root',
'coinbase'):
d[field] = encode_hex(getattr(self, field))
for field in ('number', 'difficulty', 'gas_limit', 'gas_used',
'timestamp'):
d[field] = to_string(getattr(self, field))
d['bloom'] = encode_hex(int256.serialize(self.bloom))
assert len(d) == len(BlockHeader.fields)
return d
def mirror_from(source, attributes, only_getters=True):
"""Decorator (factory) for classes that mirror some attributes from an
instance variable.
:param source: the name of the instance variable to mirror from
:param attributes: list of attribute names to mirror
:param only_getters: if true only getters but not setters are created
"""
def decorator(cls):
for attribute in attributes:
def make_gs_etter(source, attribute):
def getter(self):
return getattr(getattr(self, source), attribute)
def setter(self, value):
setattr(getattr(self, source), attribute, value)
return getter, setter
getter, setter = make_gs_etter(source, attribute)
if only_getters:
setattr(cls, attribute, property(getter))
else:
setattr(cls, attribute, property(getter, setter))
return cls
return decorator
@mirror_from('header', set(field for field, _ in BlockHeader.fields) -
set(['state_root', 'receipts_root', 'tx_list_root']),
only_getters=False)
class Block(rlp.Serializable):
"""A block.
All attributes from the block header are accessible via properties
(i.e. ``block.prevhash`` is equivalent to ``block.header.prevhash``). It
is ensured that no discrepancies between header and block occur.
:param header: the block header
:param transaction_list: a list of transactions which are replayed if the
state given by the header is not known. If the
state is known, `None` can be used instead of the
empty list.
:param uncles: a list of the headers of the uncles of this block
:param db: the database in which the block's state, transactions and
receipts are stored (required)
:param parent: optional parent which if not given may have to be loaded from
the database for replay
"""
fields = [
('header', BlockHeader),
('transaction_list', CountableList(Transaction)),
('uncles', CountableList(BlockHeader))
]
def __init__(self, header, transaction_list=[], uncles=[], db=None,
parent=None, making=False):
if db is None:
raise TypeError("No database object given")
self.db = db
self.header = header
self.uncles = uncles
self.uncles = uncles
self.suicides = []
self.logs = []
self.log_listeners = []
self.refunds = 0
self.ether_delta = 0
# Journaling cache for state tree updates
self.caches = {
'balance': {},
'nonce': {},
'code': {},
'storage': {},
'all': {}
}
self.journal = []
if self.number > 0:
self.ancestors = [self]
else:
self.ancestors = [self] + [None] * 256
# do some consistency checks on parent if given
if parent:
if self.db != parent.db:
raise ValueError("Parent lives in different database")
if self.prevhash != parent.header.hash:
raise ValueError("Block's prevhash and parent's hash do not match")
if self.number != parent.header.number + 1:
raise ValueError("Block's number is not the successor of its parent number")
if not check_gaslimit(parent, self.gas_limit):
raise ValueError("Block's gaslimit is inconsistent with its parent's gaslimit")
if self.difficulty != calc_difficulty(parent, self.timestamp):
raise ValueError("Block's difficulty is inconsistent with its parent's difficulty")
for uncle in uncles:
assert isinstance(uncle, BlockHeader)
original_values = {
'gas_used': header.gas_used,
'timestamp': header.timestamp,
'difficulty': header.difficulty,
'uncles_hash': header.uncles_hash,
'bloom': header.bloom,
}
self.transactions = Trie(db, trie.BLANK_ROOT)
self.receipts = Trie(db, trie.BLANK_ROOT)
# replay transactions if state is unknown
state_unknown = (header.prevhash != GENESIS_PREVHASH and
header.state_root != trie.BLANK_ROOT and
(len(header.state_root) != 32 or
'validated:'+self.hash not in db) and
not making)
if state_unknown:
assert transaction_list is not None
if not parent:
parent = self.get_parent()
self.state = SecureTrie(Trie(db, parent.state_root))
self.transaction_count = 0
self.gas_used = 0
# replay
for tx in transaction_list:
success, output = processblock.apply_transaction(self, tx)
self.finalize()
else:
# trust the state root in the header
self.state = SecureTrie(Trie(self.db, header._state_root))
self.transaction_count = 0
if transaction_list:
for tx in transaction_list:
self.add_transaction_to_list(tx)
if self.transactions.root_hash != header.tx_list_root:
raise ValueError("Transaction list root hash does not match")
# receipts trie populated by add_transaction_to_list is incorrect
# (it doesn't know intermediate states), so reset it
self.receipts = Trie(self.db, header.receipts_root)
# checks
set_aux(self.to_dict())
if parent:
must_equal('prev_hash', self.prevhash, parent.hash)
must_ge('gas_limit', self.gas_limit,
parent.gas_limit * (GASLIMIT_ADJMAX_FACTOR - 1) //
GASLIMIT_ADJMAX_FACTOR)
must_le('gas_limit', self.gas_limit,
parent.gas_limit * (GASLIMIT_ADJMAX_FACTOR + 1) //
GASLIMIT_ADJMAX_FACTOR)
must_equal('gas_used', original_values['gas_used'], self.gas_used)
must_equal('timestamp', self.timestamp, original_values['timestamp'])
must_equal('difficulty', self.difficulty, original_values['difficulty'])
must_equal('uncles_hash', utils.sha3(rlp.encode(uncles)), original_values['uncles_hash'])
assert header.block is None
must_equal('state_root', self.state.root_hash, header.state_root)
must_equal('tx_list_root', self.transactions.root_hash,
header.tx_list_root)
must_equal('receipts_root', self.receipts.root_hash,
header.receipts_root)
must_equal('bloom', self.bloom, original_values['bloom'])
set_aux(None)
# from now on, trie roots refer to block instead of header
header.block = self
# Basic consistency verifications
if not self.check_fields():
raise ValueError("Block is invalid")
if len(self.header.extra_data) > 1024:
raise ValueError("Extra data cannot exceed 1024 bytes")
if self.header.coinbase == '':
raise ValueError("Coinbase cannot be empty address")
if not self.state.root_hash_valid():
raise ValueError("State Merkle root of block %r not found in "
"database" % self)
if (not self.is_genesis() and self.nonce and
not self.header.check_pow(self.db)):
raise ValueError("PoW check failed")
self.db.put('validated:'+self.hash, '1')
@classmethod
def init_from_header(cls, header_rlp, db):
"""Create a block without specifying transactions or uncles.
:param header_rlp: the RLP encoded block header
:param db: the database for the block
"""
header = rlp.decode(header_rlp, BlockHeader, db=db)
return cls(header, None, [], db=db)
@classmethod
def init_from_parent(cls, parent, coinbase, nonce=b'', extra_data=b'',
timestamp=int(time.time()), uncles=[]):
"""Create a new block based on a parent block.
The block will not include any transactions and will not be finalized.
"""
header = BlockHeader(prevhash=parent.hash,
uncles_hash=utils.sha3(rlp.encode(uncles)),
coinbase=coinbase,
state_root=parent.state_root,
tx_list_root=trie.BLANK_ROOT,
receipts_root=trie.BLANK_ROOT,
bloom=0,
difficulty=calc_difficulty(parent, timestamp),
mixhash='',
number=parent.number + 1,
gas_limit=calc_gaslimit(parent),
gas_used=0,
timestamp=timestamp,
extra_data=extra_data,
nonce=nonce)
block = Block(header, [], uncles, db=parent.db,
parent=parent, making=True)
block.ancestors += parent.ancestors
return block
def check_fields(self):
"""Check that the values of all fields are well formed."""
# serialize and deserialize and check that the values didn't change
l = Block.serialize(self)
return rlp.decode(rlp.encode(l)) == l
@property
def hash(self):
"""The binary block hash
This is equivalent to ``header.hash``.
"""
return utils.sha3(rlp.encode(self.header))
def hex_hash(self):
"""The hex encoded block hash.
This is equivalent to ``header.hex_hash().
"""
return encode_hex(self.hash)
@property
def tx_list_root(self):
return self.transactions.root_hash
@tx_list_root.setter
def tx_list_root(self, value):
self.transactions = Trie(self.db, value)
@property
def receipts_root(self):
return self.receipts.root_hash
@receipts_root.setter
def receipts_root(self, value):
self.receipts = Trie(self.db, value)
@property
def state_root(self):
self.commit_state()
return self.state.root_hash
@state_root.setter
def state_root(self, value):
self.state = SecureTrie(Trie(self.db, value))
self.reset_cache()
@property
def uncles_hash(self):
return utils.sha3(rlp.encode(self.uncles))
@property
def transaction_list(self):
txs = []
for i in range(self.transaction_count):
txs.append(self.get_transaction(i))
return txs
def validate_uncles(self, db=None):
"""Validate the uncles of this block."""
if utils.sha3(rlp.encode(self.uncles)) != self.uncles_hash:
return False
if len(self.uncles) > 2:
return False
for uncle in self.uncles:
assert db is None or uncle.prevhash in self.db
if uncle.number == self.number:
log.error("uncle at same block height", block=self)
return False
# Check uncle validity
ancestor_chain = [a for a in self.get_ancestor_list(MAX_UNCLE_DEPTH + 1) if a]
ineligible = []
# Uncles of this block cannot be direct ancestors and cannot also
# be uncles included 1-6 blocks ago
for ancestor in ancestor_chain[1:]:
ineligible.extend(ancestor.uncles)
ineligible.extend([b.header for b in ancestor_chain])
eligible_ancestor_hashes = [x.hash for x in ancestor_chain[2:]]
for uncle in self.uncles:
if not uncle.check_pow(db=db):
return False
if uncle.prevhash not in eligible_ancestor_hashes:
log.error("Uncle does not have a valid ancestor", block=self,
eligible=[x.encode('hex') for x in eligible_ancestor_hashes],
uncle_prevhash=uncle.prevhash.encode('hex'))
return False
if uncle in ineligible:
log.error("Duplicate uncle", block=self,
uncle=encode_hex(utils.sha3(rlp.encode(uncle))))
return False
ineligible.append(uncle)
return True
def get_ancestor_list(self, n):
"""Return `n` ancestors of this block.
The result will also be memoized in :attr:`ancestor_list`.
:returns: a list [self, p(self), p(p(self)), ..., p^n(self)]
"""
if self.number == 0:
self.ancestors = [self] + [None] * 256
elif len(self.ancestors) <= n:
first_unknown = self.ancestors[-1].get_parent()
missing = first_unknown.get_ancestor_list(n - len(self.ancestors))
self.ancestors += missing
return self.ancestors[:n + 1]
def get_ancestor(self, n):
"""Get the `n`th ancestor of this block."""
return self.get_ancestor_list(n)[-1]
def is_genesis(self):
"""`True` if this block is the genesis block, otherwise `False`."""
return all((self.prevhash == GENESIS_PREVHASH,
self.nonce == GENESIS_NONCE))
def _get_acct(self, address):
"""Get the account with the given address.
Note that this method ignores cached account items.
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20 or len(address) == 0
rlpdata = self.state.get(address)
if rlpdata != trie.BLANK_NODE:
acct = rlp.decode(rlpdata, Account, db=self.db)
else:
acct = Account.blank_account(self.db)
return acct
def _get_acct_item(self, address, param):
"""Get a specific parameter of a specific account.
:param address: the address of the account (binary or hex string)
:param param: the requested parameter (`'nonce'`, `'balance'`,
`'storage'` or `'code'`)
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20 or len(address) == 0
if address in self.caches[param]:
return self.caches[param][address]
else:
account = self._get_acct(address)
o = getattr(account, param)
self.caches[param][address] = o
return o
def _set_acct_item(self, address, param, value):
"""Set a specific parameter of a specific account.
:param address: the address of the account (binary or hex string)
:param param: the requested parameter (`'nonce'`, `'balance'`,
`'storage'` or `'code'`)
:param value: the new value
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
self.set_and_journal(param, address, value)
self.set_and_journal('all', address, True)
def set_and_journal(self, cache, index, value):
prev = self.caches[cache].get(index, None)
if prev != value:
self.journal.append([cache, index, prev, value])
self.caches[cache][index] = value
def _delta_item(self, address, param, value):
"""Add a value to an account item.
If the resulting value would be negative, it is left unchanged and
`False` is returned.
:param address: the address of the account (binary or hex string)
:param param: the parameter to increase or decrease (`'nonce'`,
`'balance'`, `'storage'` or `'code'`)
:param value: can be positive or negative
:returns: `True` if the operation was successful, `False` if not
"""
new_value = self._get_acct_item(address, param) + value
if new_value < 0:
return False
self._set_acct_item(address, param, new_value % 2**256)
return True
def mk_transaction_receipt(self, tx):
"""Create a receipt for a transaction."""
return Receipt(self.state_root, self.gas_used, self.logs)
def add_transaction_to_list(self, tx):
"""Add a transaction to the transaction trie.
Note that this does not execute anything, i.e. the state is not
updated.
"""
k = rlp.encode(self.transaction_count)
self.transactions.update(k, rlp.encode(tx))
r = self.mk_transaction_receipt(tx)
self.receipts.update(k, rlp.encode(r))
self.bloom |= r.bloom # int
self.transaction_count += 1
def get_transaction(self, num):
"""Get the `num`th transaction in this block.
:raises: :exc:`IndexError` if the transaction does not exist
"""
index = rlp.encode(num)
tx = self.transactions.get(index)
if tx == trie.BLANK_NODE:
raise IndexError('Transaction does not exist')
else:
return rlp.decode(tx, Transaction)
def get_transactions(self):
"""Build a list of all transactions in this block."""
txs = []
for i in range(self.transaction_count):
txs.append(self.get_transaction(i))
return txs
def get_receipt(self, num):
"""Get the receipt of the `num`th transaction.
:returns: an instance of :class:`Receipt`
"""
index = rlp.encode(num)
return rlp.decode(self.receipts.get(index), Receipt)
def get_nonce(self, address):
"""Get the nonce of an account.
:param address: the address of the account (binary or hex string)
"""
return self._get_acct_item(address, 'nonce')
def set_nonce(self, address, value):
"""Set the nonce of an account.
:param address: the address of the account (binary or hex string)
:param value: the new nonce
:returns: `True` if successful, otherwise `False`
"""
return self._set_acct_item(address, 'nonce', value)
def increment_nonce(self, address):
"""Increment the nonce of an account.
:param address: the address of the account (binary or hex string)
:returns: `True` if successful, otherwise `False`
"""
return self._delta_item(address, 'nonce', 1)
def decrement_nonce(self, address):
"""Decrement the nonce of an account.
:param address: the address of the account (binary or hex string)
:returns: `True` if successful, otherwise `False`
"""
return self._delta_item(address, 'nonce', -1)
def get_balance(self, address):
"""Get the balance of an account.
:param address: the address of the account (binary or hex string)
"""
return self._get_acct_item(address, 'balance')
def set_balance(self, address, value):
"""Set the balance of an account.
:param address: the address of the account (binary or hex string)
:param value: the new balance
:returns: `True` if successful, otherwise `False`
"""
self._set_acct_item(address, 'balance', value)
def delta_balance(self, address, value):
"""Increase the balance of an account.
:param address: the address of the account (binary or hex string)
:param value: can be positive or negative
:returns: `True` if successful, otherwise `False`
"""
return self._delta_item(address, 'balance', value)
def transfer_value(self, from_addr, to_addr, value):
"""Transfer a value between two account balances.
:param from_addr: the address of the sending account (binary or hex
string)
:param to_addr: the address of the receiving account (binary or hex
string)
:param value: the (positive) value to send
:returns: `True` if successful, otherwise `False`
"""
assert value >= 0
if self.delta_balance(from_addr, -value):
return self.delta_balance(to_addr, value)
return False
def get_code(self, address):
"""Get the code of an account.
:param address: the address of the account (binary or hex string)
"""
return self._get_acct_item(address, 'code')
def set_code(self, address, value):
"""Set the code of an account.
:param address: the address of the account (binary or hex string)
:param value: the new code
:returns: `True` if successful, otherwise `False`
"""
self._set_acct_item(address, 'code', value)
def get_storage(self, address):
"""Get the trie holding an account's storage.
:param address: the address of the account (binary or hex string)
:param value: the new code
"""
storage_root = self._get_acct_item(address, 'storage')
return SecureTrie(Trie(self.db, storage_root))
def reset_storage(self, address):
self._set_acct_item(address, 'storage', b'')
CACHE_KEY = b'storage:' + address
if CACHE_KEY in self.caches:
for k in self.caches[CACHE_KEY]:
self.set_and_journal(CACHE_KEY, k, 0)
def get_storage_data(self, address, index):
"""Get a specific item in the storage of an account.
:param address: the address of the account (binary or hex string)
:param index: the index of the requested item in the storage
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
CACHE_KEY = b'storage:' + address
if CACHE_KEY in self.caches:
if index in self.caches[CACHE_KEY]:
return self.caches[CACHE_KEY][index]
key = utils.zpad(utils.coerce_to_bytes(index), 32)
storage = self.get_storage(address).get(key)
if storage:
return rlp.decode(storage, big_endian_int)
else:
return 0
def set_storage_data(self, address, index, value):
"""Set a specific item in the storage of an account.
:param address: the address of the account (binary or hex string)
:param index: the index of the item in the storage
:param value: the new value of the item
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
CACHE_KEY = b'storage:' + address
if CACHE_KEY not in self.caches:
self.caches[CACHE_KEY] = {}
self.set_and_journal('all', address, True)
self.set_and_journal(CACHE_KEY, index, value)
def account_exists(self, address):
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
return len(self.state.get(address)) > 0 or address in self.caches['all']
def add_log(self, log):
self.logs.append(log)
for L in self.log_listeners:
L(log)
def commit_state(self):
"""Commit account caches"""
"""Write the acount caches on the corresponding tries."""
changes = []
if len(self.journal) == 0:
# log_state.trace('delta', changes=[])
return
addresses = sorted(list(self.caches['all'].keys()))
for addr in addresses:
acct = self._get_acct(addr)
# storage
for field in ('balance', 'nonce', 'code', 'storage'):
if addr in self.caches[field]:
v = self.caches[field][addr]
changes.append([field, addr, v])
setattr(acct, field, v)
t = SecureTrie(Trie(self.db, acct.storage))
for k, v in self.caches.get(b'storage:' + addr, {}).items():
enckey = utils.zpad(utils.coerce_to_bytes(k), 32)
val = rlp.encode(v)
changes.append(['storage', addr, k, v])
if v:
t.update(enckey, val)
else:
t.delete(enckey)
acct.storage = t.root_hash
self.state.update(addr, rlp.encode(acct))
log_state.trace('delta', changes=changes)
self.reset_cache()
def del_account(self, address):
"""Delete an account.
:param address: the address of the account (binary or hex string)
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
self.commit_state()
self.state.delete(address)
def account_to_dict(self, address, with_storage_root=False,
with_storage=True):
"""Serialize an account to a dictionary with human readable entries.
:param address: the 20 bytes account address
:param with_storage_root: include the account's storage root
:param with_storage: include the whole account's storage
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
if with_storage_root:
# if there are uncommited account changes the current storage root
# is meaningless
assert len(self.journal) == 0
med_dict = {}
account = self._get_acct(address)
for field in ('balance', 'nonce'):
value = self.caches[field].get(address, getattr(account, field))
med_dict[field] = to_string(value)
code = self.caches['code'].get(address, account.code)
med_dict['code'] = b'0x' + encode_hex(code)
storage_trie = SecureTrie(Trie(self.db, account.storage))
if with_storage_root:
med_dict['storage_root'] = encode_hex(storage_trie.get_root_hash())
if with_storage:
med_dict['storage'] = {}
d = storage_trie.to_dict()
subcache = self.caches.get(b'storage:' + address, {})
subkeys = [utils.zpad(utils.coerce_to_bytes(kk), 32)
for kk in list(subcache.keys())]
for k in list(d.keys()) + subkeys:
v = d.get(k, None)
v2 = subcache.get(utils.big_endian_to_int(k), None)
hexkey = b'0x' + encode_hex(utils.zunpad(k))
if v2 is not None:
if v2 != 0:
med_dict['storage'][hexkey] = \
b'0x' + encode_hex(utils.int_to_big_endian(v2))
elif v is not None:
med_dict['storage'][hexkey] = b'0x' + encode_hex(rlp.decode(v))
return med_dict
def reset_cache(self):
"""Reset cache and journal without commiting any changes."""
self.caches = {
'all': {},
'balance': {},
'nonce': {},
'code': {},
'storage': {},
}
self.journal = []
def snapshot(self):
"""Make a snapshot of the current state to enable later reverting."""
return {
'state': self.state.root_hash,
'gas': self.gas_used,
'txs': self.transactions,
'txcount': self.transaction_count,
'suicides': self.suicides,
'logs': self.logs,
'refunds': self.refunds,
'suicides_size': len(self.suicides),
'logs_size': len(self.logs),
'journal': self.journal, # pointer to reference, so is not static
'journal_size': len(self.journal),
'ether_delta': self.ether_delta
}
def revert(self, mysnapshot):
"""Revert to a previously made snapshot.
Reverting is for example necessary when a contract runs out of gas
during execution.
"""
self.journal = mysnapshot['journal']
log_state.trace('reverting')
while len(self.journal) > mysnapshot['journal_size']:
cache, index, prev, post = self.journal.pop()
log_state.trace('%r %r %r %r' % (cache, index, prev, post))
if prev is not None:
self.caches[cache][index] = prev
else:
del self.caches[cache][index]
self.suicides = mysnapshot['suicides']
while len(self.suicides) > mysnapshot['suicides_size']:
self.suicides.pop()
self.logs = mysnapshot['logs']
while len(self.logs) > mysnapshot['logs_size']:
self.logs.pop()
self.refunds = mysnapshot['refunds']
self.state.root_hash = mysnapshot['state']
self.gas_used = mysnapshot['gas']
self.transactions = mysnapshot['txs']
self.transaction_count = mysnapshot['txcount']
self.ether_delta = mysnapshot['ether_delta']
def finalize(self):
"""Apply rewards and commit."""
delta = int(BLOCK_REWARD + NEPHEW_REWARD * len(self.uncles))
self.delta_balance(self.coinbase, delta)
self.ether_delta += delta
for uncle in self.uncles:
r = BLOCK_REWARD * \
(UNCLE_DEPTH_PENALTY_FACTOR + uncle.number - self.number) \
/ UNCLE_DEPTH_PENALTY_FACTOR
r = int(r)
self.delta_balance(uncle.coinbase, r)
self.ether_delta += r
self.commit_state()
def to_dict(self, with_state=False, full_transactions=False,
with_storage_roots=False, with_uncles=False):
"""Serialize the block to a readable dictionary.
:param with_state: include state for all accounts
:param full_transactions: include serialized transactions (hashes
otherwise)
:param with_storage_roots: if account states are included also include
their storage roots
:param with_uncles: include uncle hashes
"""
b = {"header": self.header.to_dict()}
txlist = []
for i, tx in enumerate(self.get_transactions()):
receipt_rlp = self.receipts.get(rlp.encode(i))
receipt = rlp.decode(receipt_rlp, Receipt)
if full_transactions:
txjson = tx.to_dict()
else:
txjson = tx.hash
txlist.append({
"tx": txjson,
"medstate": encode_hex(receipt.state_root),
"gas": to_string(receipt.gas_used),
"logs": [Log.serialize(log) for log in receipt.logs],
"bloom": utils.int256.serialize(receipt.bloom)
})
b["transactions"] = txlist
if with_state:
state_dump = {}
for address, v in self.state.to_dict().items():
state_dump[encode_hex(address)] = \
self.account_to_dict(address, with_storage_roots)
b['state'] = state_dump
if with_uncles:
b['uncles'] = [self.__class__.deserialize_header(u)
for u in self.uncles]
return b
@property
def mining_hash(self):
return utils.sha3(rlp.encode(self.header,
BlockHeader.exclude(['nonce', 'mixhash'])))
def get_parent(self):
"""Get the parent of this block."""
if self.number == 0:
raise UnknownParentException('Genesis block has no parent')
try:
parent = get_block(self.db, self.prevhash)
except KeyError:
raise UnknownParentException(encode_hex(self.prevhash))
# assert parent.state.db.db == self.state.db.db
return parent
def has_parent(self):
"""`True` if this block has a known parent, otherwise `False`."""
try:
self.get_parent()
return True
except UnknownParentException:
return False
def chain_difficulty(self):
"""Get the summarized difficulty.
If the summarized difficulty is not stored in the database, it will be
calculated recursively and put in the database.
"""
if self.is_genesis():
return self.difficulty
elif b'difficulty:' + encode_hex(self.hash) in self.db:
encoded = self.db.get(b'difficulty:' + encode_hex(self.hash))
return utils.decode_int(encoded)
else:
o = self.difficulty + self.get_parent().chain_difficulty()
o += sum([uncle.difficulty for uncle in self.uncles])
self.state.db.put(b'difficulty:' + encode_hex(self.hash),
utils.encode_int(o))
return o
return rlp.decode(rlp.encode(l)) == l
def __eq__(self, other):
"""Two blocks are equal iff they have the same hash."""
return isinstance(other, (Block, CachedBlock)) and \
self.hash == other.hash
def __hash__(self):
return utils.big_endian_to_int(self.hash)
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return self.number > other.number
def __lt__(self, other):
return self.number < other.number
def __repr__(self):
return '<Block(#%d %s)>' % (self.number, encode_hex(self.hash)[:8])
def __structlog__(self):
return encode_hex(self.hash)
@lru_cache(5)
def get_cache_memoized(seedhash, size):
return mkcache(size, seedhash)
# Gas limit adjustment algo
def calc_gaslimit(parent):
decay = parent.gas_limit // GASLIMIT_EMA_FACTOR
new_contribution = ((parent.gas_used * BLKLIM_FACTOR_NOM) //
BLKLIM_FACTOR_DEN // GASLIMIT_EMA_FACTOR)
gl = max(parent.gas_limit - decay + new_contribution, MIN_GAS_LIMIT)
if gl < GENESIS_GAS_LIMIT:
gl2 = parent.gas_limit + decay
gl = min(GENESIS_GAS_LIMIT, gl2)
assert check_gaslimit(parent, gl)
return gl
def check_gaslimit(parent, gas_limit):
# block.gasLimit - parent.gasLimit <= parent.gasLimit / GasLimitBoundDivisor
a = bool(abs(gas_limit - parent.gas_limit) <= parent.gas_limit // GASLIMIT_EMA_FACTOR)
b = bool(gas_limit >= MIN_GAS_LIMIT)
return a and b
class CachedBlock(Block):
# note: immutable refers to: do not manipulate!
_hash_cached = None
def _set_acct_item(self):
raise NotImplementedError
def set_state_root(self):
raise NotImplementedError
def revert(self):
raise NotImplementedError
def commit_state(self):
pass
def __hash__(self):
return utils.big_endian_to_int(self.hash)
@property
def hash(self):
if not self._hash_cached:
self._hash_cached = super(CachedBlock, self).hash
return self._hash_cached
@classmethod
def create_cached(cls, blk):
blk.__class__ = CachedBlock
return blk
@lru_cache(500)
def get_block(db, blockhash):
"""
Assumption: blocks loaded from the db are not manipulated
-> can be cached including hash
"""
blk = rlp.decode(db.get(blockhash), Block, db=db)
return CachedBlock.create_cached(blk)
#def has_block(blockhash):
# return blockhash in db.DB(utils.get_db_path())
def genesis(db, start_alloc=GENESIS_INITIAL_ALLOC, difficulty=GENESIS_DIFFICULTY):
"""Build the genesis block."""
# https://ethereum.etherpad.mozilla.org/11
header = BlockHeader(
prevhash=GENESIS_PREVHASH,
uncles_hash=utils.sha3(rlp.encode([])),
coinbase=GENESIS_COINBASE,
state_root=trie.BLANK_ROOT,
tx_list_root=trie.BLANK_ROOT,
receipts_root=trie.BLANK_ROOT,
bloom=0,
difficulty=difficulty,
number=0,
gas_limit=GENESIS_GAS_LIMIT,
gas_used=0,
timestamp=0,
extra_data='',
mixhash=GENESIS_MIXHASH,
nonce=GENESIS_NONCE,
)
block = Block(header, [], [], db=db)
for addr, data in start_alloc.items():
if len(addr) == 40:
addr = decode_hex(addr)
assert len(addr) == 20
if 'wei' in data:
block.set_balance(addr, int(data['wei']))
if 'balance' in data:
block.set_balance(addr, int(data['balance']))
if 'code' in data:
block.set_code(addr, utils.scanners['bin'](data['code']))
if 'nonce' in data:
block.set_nonce(addr, int(data['nonce']))
if 'storage' in data:
for k, v in data['storage'].items():
block.set_storage_data(addr,
utils.big_endian_to_int(decode_hex(k[2:])),
utils.big_endian_to_int(decode_hex(v[2:])))
block.commit_state()
block.state.db.commit()
# genesis block has predefined state root (so no additional finalization
# necessary)
return block
def dump_genesis_block_tests_data(db):
import json
g = genesis(db)
data = dict(
genesis_state_root=encode_hex(g.state_root),
genesis_hash=g.hex_hash(),
genesis_rlp_hex=encode_hex(g.serialize()),
initial_alloc=dict()
)
for addr, balance in GENESIS_INITIAL_ALLOC.items():
data['initial_alloc'][addr] = to_string(balance)
print(json.dumps(data, indent=1))
| |
import tensorflow as tf
from tensorflow.python.ops import tensor_array_ops, control_flow_ops
class Generator(object):
def __init__(self, num_emb, batch_size, emb_dim, hidden_dim,
sequence_length, start_token,
learning_rate=0.01, reward_gamma=0.95):
self.num_emb = num_emb
self.batch_size = batch_size
self.emb_dim = emb_dim
self.hidden_dim = hidden_dim
self.sequence_length = sequence_length
self.start_token = tf.constant([start_token] * self.batch_size, dtype=tf.int32)
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.reward_gamma = reward_gamma
self.g_params = []
self.d_params = []
self.temperature = 1.0
self.grad_clip = 5.0
self.expected_reward = tf.Variable(tf.zeros([self.sequence_length]))
with tf.variable_scope('generator'):
self.g_embeddings = tf.Variable(self.init_matrix([self.num_emb, self.emb_dim]))
self.g_params.append(self.g_embeddings)
self.g_recurrent_unit = self.create_recurrent_unit(self.g_params) # maps h_tm1 to h_t for generator
self.g_output_unit = self.create_output_unit(self.g_params) # maps h_t to o_t (output token logits)
# placeholder definition
self.x = tf.placeholder(tf.int32, shape=[self.batch_size, self.sequence_length]) # sequence of tokens generated by generator
self.rewards = tf.placeholder(tf.float32, shape=[self.batch_size, self.sequence_length]) # get from rollout policy and discriminator
# processed for batch
with tf.device("/cpu:0"):
self.processed_x = tf.transpose(tf.nn.embedding_lookup(self.g_embeddings, self.x), perm=[1, 0, 2]) # seq_length x batch_size x emb_dim
# Initial states
self.h0 = tf.zeros([self.batch_size, self.hidden_dim])
self.h0 = tf.stack([self.h0, self.h0])
gen_o = tensor_array_ops.TensorArray(dtype=tf.float32, size=self.sequence_length,
dynamic_size=False, infer_shape=True)
gen_x = tensor_array_ops.TensorArray(dtype=tf.int32, size=self.sequence_length,
dynamic_size=False, infer_shape=True)
def _g_recurrence(i, x_t, h_tm1, gen_o, gen_x):
h_t = self.g_recurrent_unit(x_t, h_tm1) # hidden_memory_tuple
o_t = self.g_output_unit(h_t) # batch x vocab , logits not prob
log_prob = tf.log(tf.nn.softmax(o_t))
next_token = tf.cast(tf.reshape(tf.multinomial(log_prob, 1), [self.batch_size]), tf.int32)
x_tp1 = tf.nn.embedding_lookup(self.g_embeddings, next_token) # batch x emb_dim
gen_o = gen_o.write(i, tf.reduce_sum(tf.multiply(tf.one_hot(next_token, self.num_emb, 1.0, 0.0),
tf.nn.softmax(o_t)), 1)) # [batch_size] , prob
gen_x = gen_x.write(i, next_token) # indices, batch_size
return i + 1, x_tp1, h_t, gen_o, gen_x
_, _, _, self.gen_o, self.gen_x = control_flow_ops.while_loop(
cond=lambda i, _1, _2, _3, _4: i < self.sequence_length,
body=_g_recurrence,
loop_vars=(tf.constant(0, dtype=tf.int32),
tf.nn.embedding_lookup(self.g_embeddings, self.start_token), self.h0, gen_o, gen_x))
self.gen_x = self.gen_x.stack() # seq_length x batch_size
self.gen_x = tf.transpose(self.gen_x, perm=[1, 0]) # batch_size x seq_length
# supervised pretraining for generator
g_predictions = tensor_array_ops.TensorArray(
dtype=tf.float32, size=self.sequence_length,
dynamic_size=False, infer_shape=True)
ta_emb_x = tensor_array_ops.TensorArray(
dtype=tf.float32, size=self.sequence_length)
ta_emb_x = ta_emb_x.unstack(self.processed_x)
def _pretrain_recurrence(i, x_t, h_tm1, g_predictions):
h_t = self.g_recurrent_unit(x_t, h_tm1)
o_t = self.g_output_unit(h_t)
g_predictions = g_predictions.write(i, tf.nn.softmax(o_t)) # batch x vocab_size
x_tp1 = ta_emb_x.read(i)
return i + 1, x_tp1, h_t, g_predictions
_, _, _, self.g_predictions = control_flow_ops.while_loop(
cond=lambda i, _1, _2, _3: i < self.sequence_length,
body=_pretrain_recurrence,
loop_vars=(tf.constant(0, dtype=tf.int32),
tf.nn.embedding_lookup(self.g_embeddings, self.start_token),
self.h0, g_predictions))
self.g_predictions = tf.transpose(self.g_predictions.stack(), perm=[1, 0, 2]) # batch_size x seq_length x vocab_size
# pretraining loss
self.pretrain_loss = -tf.reduce_sum(
tf.one_hot(tf.to_int32(tf.reshape(self.x, [-1])), self.num_emb, 1.0, 0.0) * tf.log(
tf.clip_by_value(tf.reshape(self.g_predictions, [-1, self.num_emb]), 1e-20, 1.0)
)
) / (self.sequence_length * self.batch_size)
# training updates
pretrain_opt = self.g_optimizer(self.learning_rate)
self.pretrain_grad, _ = tf.clip_by_global_norm(tf.gradients(self.pretrain_loss, self.g_params), self.grad_clip)
self.pretrain_updates = pretrain_opt.apply_gradients(zip(self.pretrain_grad, self.g_params))
#######################################################################################################
# Unsupervised Training
#######################################################################################################
self.g_loss = -tf.reduce_sum(
tf.reduce_sum(
tf.one_hot(tf.to_int32(tf.reshape(self.x, [-1])), self.num_emb, 1.0, 0.0) * tf.log(
tf.clip_by_value(tf.reshape(self.g_predictions, [-1, self.num_emb]), 1e-20, 1.0)
), 1) * tf.reshape(self.rewards, [-1])
)
g_opt = self.g_optimizer(self.learning_rate)
self.g_grad, _ = tf.clip_by_global_norm(tf.gradients(self.g_loss, self.g_params), self.grad_clip)
self.g_updates = g_opt.apply_gradients(zip(self.g_grad, self.g_params))
def generate(self, sess):
outputs = sess.run(self.gen_x)
return outputs
def pretrain_step(self, sess, x):
outputs = sess.run([self.pretrain_updates, self.pretrain_loss], feed_dict={self.x: x})
return outputs
def init_matrix(self, shape):
return tf.random_normal(shape, stddev=0.1)
def init_vector(self, shape):
return tf.zeros(shape)
def create_recurrent_unit(self, params):
# Weights and Bias for input and hidden tensor
self.Wi = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))
self.Ui = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))
self.bi = tf.Variable(self.init_matrix([self.hidden_dim]))
self.Wf = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))
self.Uf = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))
self.bf = tf.Variable(self.init_matrix([self.hidden_dim]))
self.Wog = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))
self.Uog = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))
self.bog = tf.Variable(self.init_matrix([self.hidden_dim]))
self.Wc = tf.Variable(self.init_matrix([self.emb_dim, self.hidden_dim]))
self.Uc = tf.Variable(self.init_matrix([self.hidden_dim, self.hidden_dim]))
self.bc = tf.Variable(self.init_matrix([self.hidden_dim]))
params.extend([
self.Wi, self.Ui, self.bi,
self.Wf, self.Uf, self.bf,
self.Wog, self.Uog, self.bog,
self.Wc, self.Uc, self.bc])
def unit(x, hidden_memory_tm1):
previous_hidden_state, c_prev = tf.unstack(hidden_memory_tm1)
# Input Gate
i = tf.sigmoid(
tf.matmul(x, self.Wi) +
tf.matmul(previous_hidden_state, self.Ui) + self.bi
)
# Forget Gate
f = tf.sigmoid(
tf.matmul(x, self.Wf) +
tf.matmul(previous_hidden_state, self.Uf) + self.bf
)
# Output Gate
o = tf.sigmoid(
tf.matmul(x, self.Wog) +
tf.matmul(previous_hidden_state, self.Uog) + self.bog
)
# New Memory Cell
c_ = tf.nn.tanh(
tf.matmul(x, self.Wc) +
tf.matmul(previous_hidden_state, self.Uc) + self.bc
)
# Final Memory cell
c = f * c_prev + i * c_
# Current Hidden state
current_hidden_state = o * tf.nn.tanh(c)
return tf.stack([current_hidden_state, c])
return unit
def create_output_unit(self, params):
self.Wo = tf.Variable(self.init_matrix([self.hidden_dim, self.num_emb]))
self.bo = tf.Variable(self.init_matrix([self.num_emb]))
params.extend([self.Wo, self.bo])
def unit(hidden_memory_tuple):
hidden_state, c_prev = tf.unstack(hidden_memory_tuple)
# hidden_state : batch x hidden_dim
logits = tf.matmul(hidden_state, self.Wo) + self.bo
# output = tf.nn.softmax(logits)
return logits
return unit
def g_optimizer(self, *args, **kwargs):
return tf.train.AdamOptimizer(*args, **kwargs)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2013 Edgewall Software
# Copyright (C) 2004 Dmitry Yusupov <dmitry_yus@yahoo.com>
# Copyright (C) 2004 Mark Rowe <mrowe@bluewire.net.nz>
# Copyright (C) 2010 Anatoly Techtonik <techtonik@php.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
"""
Import a Sourceforge project's tracker items into a Trac database.
Requires:
Trac 1.0 from http://trac.edgewall.org/
Python 2.5 from http://www.python.org/
1.0 clean-up by cboos **untested**, use at your own risks and send patches
The Sourceforge tracker items can be exported from the 'Backup' page
of the project admin section. Substitute XXXXX with project id:
https://sourceforge.net/export/xml_export2.php?group_id=XXXXX
$Id$
Uses Trac 0.11 DB format version 21
SourceForge XML Export format identified by the header:
<!DOCTYPE project_export SYSTEM "http://sourceforge.net/export/sf_project_export_0.2.dtd">
Works with all DB backends. Attachments are not downloaded, but inserted
as links to SF tracker.
Ticket Types, Priorities and Resolutions
----------------------------------------
Conversion kills default Trac ticket types:
- defect 1
- enhancement 2
- task 3
and priorities:
- blocker 1
- critical 2
- major 3
- minor 4
- trivial 5
and resolutions:
- fixed 1
- invalid 2
- wontfix 3
- duplicate 4
- worksforme 5
Versions and Milestones
-----------------------
Kills versions and milestones from existing Trac DB
Mapping
-------
tracker_name == ticket_type
group_name == version
category_name == component
user nobody == anonymous
Not implemented (feature:reason)
--------------------------------
attachments:made as a comment with links to attachments stored on SF
(type,id,filename,size,time,description,author,ipnr)
ticket_custom:unknown (ticket,name,value)
history:imported only for summary, priority. closed date and owner fields
severities:no field in source data
"""
#: rename users from SF to Trac
user_map = {"nobody":"anonymous"}
complete_msg = """
Conversion complete.
You may want to login into Trac to verify names for ticket owners. You may
also want to rename ticket types and priorities to default.
"""
from xml.etree.ElementTree import ElementTree
import time
import sys
import trac.env
# --- utility
class DBNotEmpty(Exception):
def __str__(self):
return "Will not modify database with existing tickets!"
class FlatXML(object):
"""Flat XML is XML without element attributes. Also each element
may contain other elements or text, but not both.
This object mirrors XML structure into own properties for convenient
access to tree elements, i.e. flat.trackers[2].groups[2].group_name
Uses recursion.
"""
def __init__(self, el=None):
"""el is ElementTree element"""
if el:
self.merge(el)
def merge(self, el):
"""merge supplied ElementTree element into current object"""
for c in el:
if len(c.getchildren()) == 0:
if c.text != None and len(c.text.strip()) != 0:
self.__setattr__(c.tag, c.text)
else:
self.__setattr__(c.tag, [])
else: #if c.getchildren()[0].tag == c.tag[:-1]:
# c is a set of elements
self.__setattr__(c.tag, [FlatXML(x) for x in c.getchildren()])
def __str__(self):
buf = ""
for sub in self.__dict__:
val = self.__dict__[sub]
if type(val) != list:
buf += "%s : %s\n" % (sub, val)
else:
for x in val:
buf += "\n ".join(x.__str__().split("\n"))
return buf
def __repr__(self):
buf = ""
for sub in self.__dict__:
val = self.__dict__[sub]
if type(val) != list:
buf += "<%s>%s</%s>\n" % (sub, val, sub)
else:
for x in val:
buf += "\n ".join(x.__repr__().split("\n"))
return buf
# --- SF data model
class Tracker(FlatXML):
"""
<trackers>
<tracker>
<url>http://sourceforge.net/?group_id=175454&atid=873299</url>
<tracker_id>873299</tracker_id>
<name>Bugs</name>
<description>Bug Tracking System</description>
<is_public>All site users</is_public>
<allow_anon>Yes</allow_anon>
<email_updates>Send to goblinhack@gmail.com</email_updates>
<due_period>2592000</due_period>
<submit_instructions></submit_instructions>
<browse_instructions></browse_instructions>
<status_timeout>1209600</status_timeout>
<due_period_initial>0</due_period_initial>
<due_period_update>0</due_period_update>
<reopen_on_comment>1</reopen_on_comment>
<canned_responses>
</canned_responses>
<groups>
<group>
<id>632324</id>
<group_name>v1.0 (example)</group_name>
</group>
</groups>
<categories>
<category>
<id>885178</id>
<category_name>Interface (example)</category_name>
<auto_assignee>nobody</auto_assignee>
</category>
</categories>
<resolutions>
<resolution>
<id>1</id>
<name>Fixed</name>
</resolution>
<resolution>
<id>2</id>
<name>Invalid</name>
</resolution>
...
</resolutions>
<statuses>
<status>
<id>1</id>
<name>Open</name>
</status>
<status>
<id>2</id>
<name>Closed</name>
</status>
<status>
<id>3</id>
<name>Deleted</name>
</status>
<status>
<id>4</id>
<name>Pending</name>
</status>
</statuses>
...
<tracker_items>
<tracker_item>
<url>http://sourceforge.net/support/tracker.php?aid=2471428</url>
<id>2471428</id>
<status_id>2</status_id>
<category_id>100</category_id>
<group_id>100</group_id>
<resolution_id>100</resolution_id>
<submitter>sbluen</submitter>
<assignee>nobody</assignee>
<closer>goblinhack</closer>
<submit_date>1230400444</submit_date>
<close_date>1231087612</close_date>
<priority>5</priority>
<summary>glitch with edge of level</summary>
<details>The mini-laser that the future soldier carries is so powerful that it even lets me go outside the level. I stand at the top edge of the level and then shoot up, and then it gets me somewhere where I am not supposed to go.</details>
<is_private>0</is_private>
<followups>
<followup>
<id>2335316</id>
<submitter>goblinhack</submitter>
<date>1175610236</date>
<details>Logged In: YES
user_id=1577972
Originator: NO
does this happen every game or just once?
you could send me the saved file and I'll try and load it - old
versions harldy ever work with newer versions - need to add some
kind of warnings on that
tx</details>
</followup>
...
</followups>
<attachments>
<attachment>
<url>http://sourceforge.net/tracker/download.php?group_id=175454&atid=873299&file_id=289080&aid=</url>
<id>289080</id>
<filename>your_most_recent_game.gz</filename>
<description>my saved game</description>
<filesize>112968</filesize>
<filetype>application/x-gzip</filetype>
<date>1218987770</date>
<submitter>sbluen</submitter>
</attachment>
...
</attachments>
<history_entries>
<history_entry>
<id>7304242</id>
<field_name>IP</field_name>
<old_value>Artifact Created: 76.173.48.148</old_value>
<date>1230400444</date>
<updator>sbluen</updator>
</history_entry>
...
</history_entries>
</tracker_item>
...
</tracker_items>
...
</tracker>
</trackers>
"""
def __init__(self, e):
self.merge(e)
class ExportedProjectData(object):
"""Project data container as Python object.
"""
def __init__(self, f):
"""Data parsing"""
self.trackers = [] #: tracker properties and data
self.groups = [] #: groups []
self.priorities = [] #: priorities used
self.resolutions = [] #: resolutions (index, name)
self.tickets = [] #: all tickets
self.statuses = [] #: status (idx, name)
self.used_resolutions = {} #: id:name
self.used_categories = {} #: id:name
# id '100' means no category
self.used_categories['100'] = None
self.users = {} #: id:name
root = ElementTree().parse(f)
self.users = dict([(FlatXML(u).userid, FlatXML(u).username)
for u in root.find('referenced_users')])
for tracker in root.find('trackers'):
tr = Tracker(tracker)
self.trackers.append(tr)
# groups-versions
for grp in tr.groups:
# group ids are tracker-specific even if names match
g = (grp.id, grp.group_name)
if g not in self.groups:
self.groups.append(g)
# resolutions
for res in tr.resolutions:
r = (res.id, res.name)
if r not in self.resolutions:
self.resolutions.append(r)
# statuses
self.statuses = [(s.id, s.name) for s in tr.statuses]
# tickets
for tck in tr.tracker_items:
if type(tck) == str:
print(repr(tck))
self.tickets.append(tck)
if int(tck.priority) not in self.priorities:
self.priorities.append(int(tck.priority))
res_id = getattr(tck, "resolution_id", None)
if res_id is not None and res_id not in self.used_resolutions:
for idx, name in self.resolutions:
if idx == res_id: break
self.used_resolutions[res_id] = \
dict(self.resolutions)[res_id]
# used categories
categories = dict(self.get_categories(tr, noowner=True))
if tck.category_id not in self.used_categories:
self.used_categories[tck.category_id] = \
categories[tck.category_id]
# sorting everything
self.trackers.sort(key=lambda x:x.name)
self.groups.sort()
self.priorities.sort()
def get_categories(self, tracker=None, noid=False, noowner=False):
""" SF categories : Trac components
(id, name, owner) tuples for specified tracker or all trackers
if noid or noowner flags are set, specified tuple attribute is
stripped
"""
trs = [tracker] if tracker is not None else self.trackers
categories = []
for tr in trs:
for cat in tr.categories:
c = (cat.id, cat.category_name, cat.auto_assignee)
if c not in categories:
categories.append(c)
#: sort by name
if noid:
categories.sort()
else:
categories.sort(key=lambda x:x[1])
if noowner:
categories = [x[:2] for x in categories]
if noid:
categories = [x[1:] for x in categories]
return categories
class TracDatabase(object):
def __init__(self, path):
self.env = trac.env.Environment(path)
def hasTickets(self):
return int(self.env.db_query("SELECT count(*) FROM ticket")[0][0]) > 0
def dbCheck(self):
if self.hasTickets():
raise DBNotEmpty
def setTypeList(self, s):
"""Remove all types, set them to `s`"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='ticket_type'")
for i, value in enumerate(s):
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
("ticket_type", value, i))
def setPriorityList(self, s):
"""Remove all priorities, set them to `s`"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='priority'")
for i, value in enumerate(s):
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
("priority", value, i))
def setResolutionList(self, t):
"""Remove all resolutions, set them to `t` (index, name)"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='resolution'")
for value, name in t:
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
("resolution", name, value))
def setComponentList(self, t):
"""Remove all components, set them to `t` (name, owner)"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM component")
for name, owner in t:
db("INSERT INTO component (name, owner) VALUES (%s, %s)",
(name, owner))
def setVersionList(self, v):
"""Remove all versions, set them to `v`"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM version")
for value in v:
# time and description are also available
db("INSERT INTO version (name) VALUES (%s)", value)
def setMilestoneList(self, m):
"""Remove all milestones, set them to `m` ("""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM milestone")
for value in m:
# due, completed, description are also available
db("INSERT INTO milestone (name) VALUES (%s)", value)
def addTicket(self, type, time, changetime, component,
priority, owner, reporter, cc,
version, milestone, status, resolution,
summary, description, keywords):
""" ticket table db21.py format
id integer PRIMARY KEY,
type text, -- the nature of the ticket
time integer, -- the time it was created
changetime integer,
component text,
severity text,
priority text,
owner text, -- who is this ticket assigned to
reporter text,
cc text, -- email addresses to notify
version text, --
milestone text, --
status text,
resolution text,
summary text, -- one-line summary
description text, -- problem description (long)
keywords text
"""
if status.lower() == 'open':
if owner != '':
status = 'assigned'
else:
status = 'new'
with self.env.db_transaction as db:
c = db.cursor()
c.execute("""
INSERT INTO ticket (type, time, changetime, component,
priority, owner, reporter, cc, version,
milestone, status, resolution, summary,
description, keywords)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s)
""", (type, time, changetime, component, priority, owner,
reporter, cc, version, milestone, status.lower(),
resolution, summary, '%s' % description, keywords))
return db.get_last_id(c, 'ticket')
def addTicketComment(self, ticket, time, author, value):
with self.env.db_transaction as db:
db("""
INSERT INTO ticket_change (ticket, time, author, field,
oldvalue, newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (ticket, time, author, 'comment', '', '%s' % value))
def addTicketChange(self, ticket, time, author, field, oldvalue, newvalue):
with self.env.db_transaction as db:
db("""INSERT INTO ticket_change (ticket, time, author, field,
oldvalue, newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (ticket, time, author, field, oldvalue, newvalue))
def importData(f, env, opt):
project = ExportedProjectData(f)
trackers = project.trackers
trac = TracDatabase(env)
# Data conversion
typeList = [x.name for x in trackers]
print("%d trackers will be converted to the following ticket types:\n %s" \
% (len(trackers), typeList))
used_cat_names = set(project.used_categories.values())
#: make names unique, forget about competing owners (the last one wins)
components = dict(project.get_categories(noid=True)).items()
components.sort()
components = [x for x in components if x[0] in used_cat_names]
print("%d out of %d categories are used and will be converted to the"
" following components:\n %s"
% (len(components), len(project.get_categories()), components))
print("..renaming component owners:")
for i,c in enumerate(components):
if c[1] in user_map:
components[i] = (c[0], user_map[c[1]])
print(" %s" % components)
print("%d groups which will be converted to the following versions:\n"
" %s" % (len(project.groups), project.groups))
print("%d resolutions found :\n %s"
% (len(project.resolutions), project.resolutions))
resolutions = [(k,project.used_resolutions[k])
for k in project.used_resolutions]
resolutions.sort(key=lambda x:int(x[0]))
print(".. only %d used will be imported:\n %s"
% (len(resolutions), resolutions))
print("Priorities used so far: %s" % project.priorities)
if not(raw_input("Continue [y/N]?").lower() == 'y'):
sys.exit()
# Data save
trac.setTypeList(typeList)
trac.setComponentList(components)
trac.setPriorityList(range(min(project.priorities),
max(project.priorities)))
trac.setVersionList(set([x[1] for x in project.groups]))
trac.setResolutionList(resolutions)
trac.setMilestoneList([])
for tracker in project.trackers:
# id 100 means no component selected
component_lookup = dict(project.get_categories(noowner=True) +
[("100", None)])
for t in tracker.tracker_items:
i = trac.addTicket(type=tracker.name,
time=int(t.submit_date),
changetime=int(t.submit_date),
component=component_lookup[t.category_id],
priority=t.priority,
owner=t.assignee \
if t.assignee not in user_map \
else user_map[t.assignee],
reporter=t.submitter \
if t.submitter not in user_map \
else user_map[t.submitter],
cc=None,
# 100 means no group selected
version=dict(project.groups +
[("100", None)])[t.group_id],
milestone=None,
status=dict(project.statuses)[t.status_id],
resolution=dict(resolutions)[t.resolution_id] \
if hasattr(t, "resolution_id") else None,
summary=t.summary,
description=t.details,
keywords='sf' + t.id)
print("Imported %s as #%d" % (t.id, i))
if len(t.attachments):
attmsg = "SourceForge attachments:\n"
for a in t.attachments:
attmsg = attmsg + " * [%s %s] (%s) - added by '%s' %s [[BR]] "\
% (a.url+t.id, a.filename, a.filesize+" bytes",
user_map.get(a.submitter, a.submitter),
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(int(a.date))))
attmsg = attmsg + "''%s ''\n" % (a.description or '')
# empty description is as empty list
trac.addTicketComment(ticket=i,
time=time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(int(t.submit_date))),
author=None, value=attmsg)
print(" added information about %d attachments for #%d"
% (len(t.attachments), i))
for msg in t.followups:
"""
<followup>
<id>3280792</id>
<submitter>goblinhack</submitter>
<date>1231087739</date>
<details>done</details>
</followup>
"""
trac.addTicketComment(ticket=i,
time=msg.date,
author=msg.submitter,
value=msg.details)
if t.followups:
print(" imported %d messages for #%d"
% (len(t.followups), i))
# Import history
"""
<history_entry>
<id>4452195</id>
<field_name>resolution_id</field_name>
<old_value>100</old_value>
<date>1176043865</date>
<updator>goblinhack</updator>
</history_entry>
"""
revision = t.__dict__.copy()
# iterate the history in reverse order and update ticket revision from
# current (last) to initial
changes = 0
for h in sorted(t.history_entries, reverse=True):
"""
Processed fields (field - notes):
IP - no target field, just skip
summary
priority
close_date
assigned_to
Fields not processed (field: explanation):
File Added - TODO
resolution_id - need to update used_resolutions
status_id
artifact_group_id
category_id
group_id
"""
f = None
if h.field_name in ("IP",):
changes += 1
continue
elif h.field_name in ("summary", "priority"):
f = h.field_name
oldvalue = h.old_value
newvalue = revision.get(h.field_name)
elif h.field_name == 'assigned_to':
f = "owner"
newvalue = revision['assignee']
if h.old_value == '100': # was not assigned
revision['assignee'] = None
oldvalue = None
else:
username = project.users[h.old_value]
if username in user_map: username = user_map[username]
revision['assignee'] = oldvalue = username
elif h.field_name == 'close_date' and revision['close_date'] != 0:
f = 'status'
oldvalue = 'assigned'
newvalue = 'closed'
if f:
changes += 1
trac.addTicketChange(ticket=i,
time=h.date,
author=h.updator,
field=f,
oldvalue=oldvalue,
newvalue=newvalue)
if h.field_name != 'assigned_to':
revision[h.field_name] = h.old_value
if changes:
print(" processed %d out of %d history items for #%d"
% (changes, len(t.history_entries), i))
def main():
import optparse
p = optparse.OptionParser(
"Usage: %prog xml_export.xml /path/to/trac/environment")
opt, args = p.parse_args()
if len(args) != 2:
p.error("Incorrect number of arguments")
try:
importData(open(args[0]), args[1], opt)
except DBNotEmpty as e:
print("Error: " + e)
sys.exit(1)
print(complete_msg)
if __name__ == '__main__':
main()
| |
# Copyright 2013-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import base64
import os.path
import random
import tempfile
import sys
from requestbuilder import Arg, MutuallyExclusiveArgList
from requestbuilder.exceptions import ArgumentError
import euca2ools.bundle.manifest
import euca2ools.bundle.util
from euca2ools.commands.argtypes import (b64encoded_file_contents,
delimited_list, filesize,
manifest_block_device_mappings)
from euca2ools.commands.s3.checkbucket import CheckBucket
from euca2ools.commands.s3.createbucket import CreateBucket
from euca2ools.commands.s3.getobject import GetObject
from euca2ools.commands.s3.postobject import PostObject
from euca2ools.commands.s3.putobject import PutObject
from euca2ools.exceptions import AWSError
EC2_BUNDLE_SIZE_LIMIT = 10 * 2 ** 30 # 10 GiB
class BundleCreatingMixin(object):
ARGS = [Arg('-i', '--image', metavar='FILE', required=True,
help='file containing the image to bundle (required)'),
Arg('-p', '--prefix', help='''the file name prefix to give the
bundle's files (required when bundling stdin; otherwise
defaults to the image's file name)'''),
Arg('-d', '--destination', metavar='DIR', help='''location to place
the bundle's files (default: dir named by TMPDIR, TEMP, or TMP
environment variables, or otherwise /var/tmp)'''),
Arg('-r', '--arch', required=True,
choices=('i386', 'x86_64', 'armhf', 'ppc', 'ppc64'),
help="the image's architecture (required)"),
# User- and cloud-specific stuff
Arg('-k', '--privatekey', metavar='FILE', help='''file containing
your private key to sign the bundle's manifest with. This
private key will also be required to unbundle the image in the
future.'''),
Arg('-c', '--cert', metavar='FILE',
help='file containing your X.509 certificate'),
Arg('--ec2cert', metavar='FILE', help='''file containing the
cloud's X.509 certificate'''),
Arg('-u', '--user', metavar='ACCOUNT', help='your account ID'),
Arg('--kernel', metavar='IMAGE', help='''ID of the kernel image to
associate with this machine image'''),
Arg('--ramdisk', metavar='IMAGE', help='''ID of the ramdisk image
to associate with this machine image'''),
# Obscurities
Arg('-B', '--block-device-mappings',
metavar='VIRTUAL1=DEVICE1,VIRTUAL2=DEVICE2,...',
type=manifest_block_device_mappings,
help='''block device mapping scheme with which to launch
instances of this machine image'''),
Arg('--productcodes', metavar='CODE1,CODE2,...',
type=delimited_list(','), default=[],
help='comma-separated list of product codes for the image'),
Arg('--image-type', choices=('machine', 'kernel', 'ramdisk'),
default='machine', help=argparse.SUPPRESS),
# Stuff needed to fill out TarInfo when input comes from stdin.
#
# We technically could ask for a lot more, but most of it is
# unnecessary since owners/modes/etc will be ignored at unbundling
# time anyway.
#
# When bundling stdin we interpret --prefix as the image's file
# name.
Arg('--image-size', type=filesize, help='''the image's size
(required when bundling stdin)'''),
# Overrides for debugging and other entertaining uses
Arg('--part-size', type=filesize, default=10485760, # 10M
help=argparse.SUPPRESS),
Arg('--enc-key', type=(lambda s: int(s, 16)),
help=argparse.SUPPRESS), # a hex string
Arg('--enc-iv', type=(lambda s: int(s, 16)),
help=argparse.SUPPRESS), # a hex string
# Noop, for compatibility
Arg('--batch', action='store_true', help=argparse.SUPPRESS)]
### CONFIG METHODS ###
def configure_bundle_creds(self):
# User's X.509 certificate (user-level in config)
if not self.args.get('cert'):
config_cert = self.config.get_user_option('certificate')
if 'EC2_CERT' in os.environ:
self.args['cert'] = os.getenv('EC2_CERT')
elif 'EUCA_CERT' in os.environ: # used by the NC
self.args['cert'] = os.getenv('EUCA_CERT')
elif config_cert:
self.args['cert'] = config_cert
if self.args.get('cert'):
self.args['cert'] = os.path.expanduser(os.path.expandvars(
self.args['cert']))
_assert_is_file(self.args['cert'], 'user certificate')
# User's private key (user-level in config)
if not self.args.get('privatekey'):
config_privatekey = self.config.get_user_option('private-key')
if 'EC2_PRIVATE_KEY' in os.environ:
self.args['privatekey'] = os.getenv('EC2_PRIVATE_KEY')
if 'EUCA_PRIVATE_KEY' in os.environ: # used by the NC
self.args['privatekey'] = os.getenv('EUCA_PRIVATE_KEY')
elif config_privatekey:
self.args['privatekey'] = config_privatekey
if self.args.get('privatekey'):
self.args['privatekey'] = os.path.expanduser(os.path.expandvars(
self.args['privatekey']))
_assert_is_file(self.args['privatekey'], 'private key')
# Cloud's X.509 cert (region-level in config)
if not self.args.get('ec2cert'):
config_privatekey = self.config.get_region_option('certificate')
if 'EUCALYPTUS_CERT' in os.environ:
# This has no EC2 equivalent since they just bundle their cert.
self.args['ec2cert'] = os.getenv('EUCALYPTUS_CERT')
elif config_privatekey:
self.args['ec2cert'] = config_privatekey
if self.args.get('ec2cert'):
self.args['ec2cert'] = os.path.expanduser(os.path.expandvars(
self.args['ec2cert']))
_assert_is_file(self.args['ec2cert'], 'cloud certificate')
# User's account ID (user-level)
if not self.args.get('user'):
config_account_id = self.config.get_user_option('account-id')
if 'EC2_USER_ID' in os.environ:
self.args['user'] = os.getenv('EC2_USER_ID')
elif config_account_id:
self.args['user'] = config_account_id
# Now validate everything
if not self.args.get('cert'):
raise ArgumentError(
'missing certificate; please supply one with -c')
self.log.debug('certificate: %s', self.args['cert'])
if not self.args.get('privatekey'):
raise ArgumentError(
'missing private key; please supply one with -k')
self.log.debug('private key: %s', self.args['privatekey'])
if not self.args.get('ec2cert'):
raise ArgumentError(
'missing cloud certificate; please supply one with --ec2cert')
self.log.debug('cloud certificate: %s', self.args['ec2cert'])
if not self.args.get('user'):
raise ArgumentError(
'missing account ID; please supply one with --user')
self.log.debug('account ID: %s', self.args['user'])
def configure_bundle_output(self):
if (self.args.get('destination') and
os.path.exists(self.args['destination']) and not
os.path.isdir(self.args['destination'])):
raise ArgumentError("argument -d/--destination: '{0}' is not a "
"directory".format(self.args['destination']))
if self.args['image'] == '-':
self.args['image'] = sys.stdin
if not self.args.get('prefix'):
raise ArgumentError(
'argument --prefix is required when bundling stdin')
if not self.args.get('image_size'):
raise ArgumentError(
'argument --image-size is required when bundling stdin')
elif isinstance(self.args['image'], basestring):
if not self.args.get('prefix'):
self.args['prefix'] = os.path.basename(self.args['image'])
if not self.args.get('image_size'):
self.args['image_size'] = euca2ools.util.get_filesize(
self.args['image'])
self.args['image'] = open(self.args['image'])
else:
# Assume it is already a file object
if not self.args.get('prefix'):
raise ArgumentError('argument --prefix is required when '
'bundling a file object')
if not self.args.get('image_size'):
raise ArgumentError('argument --image-size is required when '
'bundling a file object')
if self.args['image_size'] > EC2_BUNDLE_SIZE_LIMIT:
self.log.warn(
'image is incompatible with EC2 due to its size (%i > %i)',
self.args['image_size'], EC2_BUNDLE_SIZE_LIMIT)
def configure_bundle_properties(self):
if self.args.get('kernel') == 'true':
self.args['image_type'] = 'kernel'
if self.args.get('ramdisk') == 'true':
self.args['image_type'] = 'ramdisk'
if self.args['image_type'] == 'kernel':
if self.args.get('kernel') and self.args['kernel'] != 'true':
raise ArgumentError("argument --kernel: not compatible with "
"image type 'kernel'")
if self.args.get('ramdisk'):
raise ArgumentError("argument --ramdisk: not compatible with "
"image type 'kernel'")
if self.args.get('block_device_mappings'):
raise ArgumentError("argument -B/--block-device-mappings: not "
"compatible with image type 'kernel'")
if self.args['image_type'] == 'ramdisk':
if self.args.get('kernel'):
raise ArgumentError("argument --kernel: not compatible with "
"image type 'ramdisk'")
if self.args.get('ramdisk') and self.args['ramdisk'] != 'true':
raise ArgumentError("argument --ramdisk: not compatible with "
"image type 'ramdisk'")
if self.args.get('block_device_mappings'):
raise ArgumentError("argument -B/--block-device-mappings: not "
"compatible with image type 'ramdisk'")
def generate_encryption_keys(self):
srand = random.SystemRandom()
if self.args.get('enc_key'):
self.log.info('using preexisting encryption key')
enc_key_i = self.args['enc_key']
else:
enc_key_i = srand.getrandbits(128)
if self.args.get('enc_iv'):
self.log.info('using preexisting encryption IV')
enc_iv_i = self.args['enc_iv']
else:
enc_iv_i = srand.getrandbits(128)
self.args['enc_key'] = '{0:0>32x}'.format(enc_key_i)
self.args['enc_iv'] = '{0:0>32x}'.format(enc_iv_i)
### MANIFEST GENERATION METHODS ###
def build_manifest(self, digest, partinfo):
manifest = euca2ools.bundle.manifest.BundleManifest(
loglevel=self.log.level)
manifest.image_arch = self.args['arch']
manifest.kernel_id = self.args.get('kernel')
manifest.ramdisk_id = self.args.get('ramdisk')
if self.args.get('block_device_mappings'):
manifest.block_device_mappings.update(
self.args['block_device_mappings'])
if self.args.get('productcodes'):
manifest.product_codes.extend(self.args['productcodes'])
manifest.image_name = self.args['prefix']
manifest.account_id = self.args['user']
manifest.image_type = self.args['image_type']
manifest.image_digest = digest
manifest.image_digest_algorithm = 'SHA1' # shouldn't be hardcoded here
manifest.image_size = self.args['image_size']
manifest.bundled_image_size = sum(part.size for part in partinfo)
manifest.enc_key = self.args['enc_key']
manifest.enc_iv = self.args['enc_iv']
manifest.enc_algorithm = 'AES-128-CBC' # shouldn't be hardcoded here
manifest.image_parts = partinfo
return manifest
def dump_manifest_to_file(self, manifest, filename, pretty_print=False):
with open(filename, 'w') as manifest_file:
manifest_file.write(self.dump_manifest_to_str(
manifest, pretty_print=pretty_print))
def dump_manifest_to_str(self, manifest, pretty_print=False):
return manifest.dump_to_str(self.args['privatekey'], self.args['cert'],
self.args['ec2cert'],
pretty_print=pretty_print)
class BundleUploadingMixin(object):
ARGS = [Arg('-b', '--bucket', metavar='BUCKET[/PREFIX]', required=True,
help='bucket to upload the bundle to (required)'),
Arg('--acl', default='aws-exec-read',
choices=('public-read', 'aws-exec-read', 'ec2-bundle-read'),
help='''canned ACL policy to apply to the bundle (default:
aws-exec-read)'''),
MutuallyExclusiveArgList(
Arg('--upload-policy', dest='upload_policy', metavar='POLICY',
type=base64.b64encode,
help='upload policy to use for authorization'),
Arg('--upload-policy-file', dest='upload_policy',
metavar='FILE', type=b64encoded_file_contents,
help='''file containing an upload policy to use for
authorization''')),
Arg('--upload-policy-signature', metavar='SIGNATURE',
help='''signature for the upload policy (required when an
'upload policy is used)'''),
Arg('--location', help='''location constraint of the destination
bucket (default: inferred from s3-location-constraint in
configuration, or otherwise none)'''),
Arg('--retry', dest='retries', action='store_const', const=5,
default=0, help='retry failed uploads up to 5 times')]
def configure_bundle_upload_auth(self):
if self.args.get('upload_policy'):
if not self.args.get('key_id'):
raise ArgumentError('-I/--access-key-id is required when '
'using an upload policy')
if not self.args.get('upload_policy_signature'):
raise ArgumentError('--upload-policy-signature is required '
'when using an upload policy')
self.auth = None
def get_bundle_key_prefix(self):
(bucket, _, prefix) = self.args['bucket'].partition('/')
if prefix and not prefix.endswith('/'):
prefix += '/'
return bucket + '/' + prefix
def ensure_dest_bucket_exists(self):
if self.args.get('upload_policy'):
# We won't have creds to sign our own requests
self.log.info('using an upload policy; not verifying bucket '
'existence')
return
bucket = self.args['bucket'].split('/', 1)[0]
try:
req = CheckBucket(bucket=bucket, service=self.service,
config=self.config)
req.main()
except AWSError as err:
if err.status_code == 404:
# No such bucket
self.log.info("creating bucket '%s'", bucket)
req = CreateBucket(bucket=bucket,
location=self.args.get('location'),
config=self.config, service=self.service)
req.main()
else:
raise
# At this point we know we can at least see the bucket, but it's still
# possible that we can't write to it with the desired key names. So
# many policies are in play here that it isn't worth trying to be
# proactive about it.
def upload_bundle_file(self, source, dest, show_progress=False,
**putobj_kwargs):
if self.args.get('upload_policy'):
if show_progress:
# PostObject does not yet support show_progress
print source, 'uploading...'
req = PostObject(source=source, dest=dest,
acl=self.args.get('acl') or 'aws-exec-read',
Policy=self.args['upload_policy'],
Signature=self.args['upload_policy_signature'],
AWSAccessKeyId=self.args['key_id'],
service=self.service, config=self.config,
**putobj_kwargs)
else:
req = PutObject(source=source, dest=dest,
acl=self.args.get('acl') or 'aws-exec-read',
retries=self.args.get('retries') or 0,
show_progress=show_progress,
service=self.service, config=self.config,
**putobj_kwargs)
req.main()
def upload_bundle_parts(self, partinfo_in_mpconn, key_prefix,
partinfo_out_mpconn=None, part_write_sem=None,
**putobj_kwargs):
try:
while True:
part = partinfo_in_mpconn.recv()
dest = key_prefix + os.path.basename(part.filename)
self.upload_bundle_file(part.filename, dest, **putobj_kwargs)
if part_write_sem is not None:
# Allow something that's waiting for the upload to finish
# to continue
part_write_sem.release()
if partinfo_out_mpconn is not None:
partinfo_out_mpconn.send(part)
except EOFError:
return
finally:
partinfo_in_mpconn.close()
if partinfo_out_mpconn is not None:
partinfo_out_mpconn.close()
class BundleDownloadingMixin(object):
# When fetching the manifest from the server there are two ways to get
# its path:
# -m: BUCKET[/PREFIX]/MANIFEST
# -p: BUCKET[/PREFIX]/PREFIX.manifest.xml (the PREFIXes are different)
#
# In all cases, after we obtain the manifest (whether it is local or not)
# we choose key names for parts based on the file names in the manifest:
# BUCKET[/PREFIX]/PART
ARGS = [Arg('-b', '--bucket', metavar='BUCKET[/PREFIX]', required=True,
route_to=None, help='''the bucket that contains the bundle,
with an optional path prefix (required)'''),
MutuallyExclusiveArgList(
Arg('-m', '--manifest', dest='manifest', route_to=None,
help='''the manifest's complete file name, not including
any path that may be specified using -b'''),
Arg('-p', '--prefix', dest='manifest', route_to=None,
type=(lambda x: x + '.manifest.xml'),
help='''the portion of the manifest's file name that
precedes ".manifest.xml"'''),
Arg('--local-manifest', dest='local_manifest', metavar='FILE',
route_to=None, help='''use a manifest on disk and ignore
any that appear on the server'''))
.required()]
def fetch_manifest(self, s3_service, privkey_filename=None):
if self.args.get('local_manifest'):
_assert_is_file(self.args['local_manifest'], 'manifest')
return euca2ools.bundle.manifest.BundleManifest.read_from_file(
self.args['local_manifest'], privkey_filename=privkey_filename)
# It's on the server, so do things the hard way
manifest_s3path = self.get_manifest_s3path()
with tempfile.TemporaryFile() as manifest_tempfile:
self.log.info('reading manifest from %s', manifest_s3path)
req = GetObject(config=self.config, service=s3_service,
source=manifest_s3path, dest=manifest_tempfile)
try:
req.main()
except AWSError as err:
if err.status_code == 404:
self.log.debug('failed to fetch manifest', exc_info=True)
raise ValueError("manifest '{0}' does not exist on the "
"server".format(manifest_s3path))
raise
manifest_tempfile.flush()
manifest_tempfile.seek(0)
return euca2ools.bundle.manifest.BundleManifest.read_from_fileobj(
manifest_tempfile, privkey_filename=privkey_filename)
def get_manifest_s3path(self):
if self.args.get('manifest'):
return '/'.join((self.args['bucket'], self.args['manifest']))
else:
# With a local manifest we can't divine the manifest's key name is
return None
def download_bundle_to_dir(self, manifest, dest_dir, s3_service):
parts = self.map_bundle_parts_to_s3paths(manifest)
for part, part_s3path in parts:
part.filename = os.path.join(dest_dir,
os.path.basename(part_s3path))
self.log.info('downloading part %s to %s',
part_s3path, part.filename)
req = GetObject(
config=self.config, service=s3_service,
source=part_s3path, dest=part.filename,
show_progress=self.args.get('show_progress', False))
response = req.main()
self.__check_part_sha1(part, part_s3path, response)
manifest_s3path = self.get_manifest_s3path()
if manifest_s3path:
# Can't download a manifest if we're using a local one
manifest_dest = os.path.join(dest_dir,
os.path.basename(manifest_s3path))
self.log.info('downloading manifest %s to %s',
manifest_s3path, manifest_dest)
req = GetObject(
config=self.config, service=s3_service,
source=manifest_s3path, dest=manifest_dest,
show_progress=self.args.get('show_progress', False))
req.main()
return manifest_dest
return None
def download_bundle_to_fileobj(self, manifest, fileobj, s3_service):
# We can skip downloading the manifest since we're just writing all
# parts to a file object.
parts = self.map_bundle_parts_to_s3paths(manifest)
for part, part_s3path in parts:
self.log.info('downloading part %s', part_s3path)
req = GetObject(
config=self.config, service=s3_service,
source=part_s3path, dest=fileobj,
show_progress=self.args.get('show_progress', False))
response = req.main()
self.__check_part_sha1(part, part_s3path, response)
def map_bundle_parts_to_s3paths(self, manifest):
parts = []
for part in manifest.image_parts:
parts.append((part,
'/'.join((self.args['bucket'], part.filename))))
return parts
def __check_part_sha1(self, part, part_s3path, response):
if response[part_s3path]['sha1'] != part.hexdigest:
self.log.error('rejecting download due to manifest SHA1 '
'mismatch (expected: %s, actual: %s)',
part.hexdigest, response[part_s3path]['sha1'])
raise RuntimeError('downloaded file {0} appears to be corrupt '
'(expected SHA1: {0}, actual: {1}'
.format(part.hexdigest,
response[part_s3path]['sha1']))
def _assert_is_file(filename, filetype):
if not os.path.exists(filename):
raise ArgumentError("{0} file '{1}' does not exist"
.format(filetype, filename))
if not os.path.isfile(filename):
raise ArgumentError("{0} file '{1}' is not a file"
.format(filetype, filename))
| |
import os
from unittest import TestCase
from ..database import delete_database, create_session, cleanup_database
from ..database.exceptions import ObjectAlreadyExists, ObjectNotFound
from ..database.exceptions import NonMatchingTaxonomyIds
from ..database.models import Protein, Interaction, Psimi, Pubmed
from ..database.validators import (
format_annotation,
format_annotations,
format_label,
format_labels,
validate_annotations,
validate_go_annotations,
validate_pfam_annotations,
validate_interpro_annotations,
validate_keywords,
validate_function,
validate_protein,
validate_source_and_target,
validate_same_taxonid,
validate_labels,
validate_boolean,
validate_joint_id,
validate_interaction_does_not_exist,
validate_training_holdout_is_labelled,
validate_gene_id,
validate_taxon_id,
validate_accession,
validate_description,
validate_accession_does_not_exist,
validate_uniprot_does_not_exist
)
base_path = os.path.dirname(__file__)
db_path = os.path.normpath("{}/databases/test.db".format(base_path))
class TestFormatAnnotation(TestCase):
def test_returns_none_if_input_is_none(self):
result = format_annotation(None)
self.assertIsNone(result)
def test_returns_none_empty_string(self):
result = format_annotation(' ')
self.assertIsNone(result)
def test_upper_cases_by_default(self):
result = format_annotation('go111')
self.assertEqual(result, 'GO111')
def test_lower_cases_if_upper_is_false(self):
result = format_annotation('go111', upper=False)
self.assertEqual(result, 'go111')
def test_strips_white_space(self):
result = format_annotation(' go111 ')
self.assertEqual(result, 'GO111')
def test_type_err_not_str_or_none(self):
with self.assertRaises(TypeError):
format_annotation(1)
with self.assertRaises(TypeError):
format_annotation([])
class TestFormatAnnotations(TestCase):
def test_removes_duplicate_annotations(self):
value = "1,1"
expected = ["1"]
result = format_annotations(value, allow_duplicates=False)
self.assertEqual(result, expected)
def test_does_not_uppercase_when_upper_is_false(self):
value = "dog"
expected = ["dog"]
result = format_annotations(value, upper=False)
self.assertEqual(result, expected)
def test_allows_duplicate_annotations(self):
value = "1,1"
expected = ["1", "1"]
result = format_annotations(value, allow_duplicates=True)
self.assertEqual(result, expected)
def test_alpha_orders_annotations(self):
value = "2,1"
expected = ["1", "2"]
result = format_annotations(value)
self.assertEqual(result, expected)
def test_uppercases_annotations(self):
value = "dog"
expected = ["DOG"]
result = format_annotations(value)
self.assertEqual(result, expected)
def test_removes_blank(self):
value = "1,,"
expected = ["1"]
result = format_annotations(value)
self.assertEqual(result, expected)
def test_strips_whitespace(self):
value = " 1 "
expected = ["1"]
result = format_annotations(value)
self.assertEqual(result, expected)
def test_splits_on_comma(self):
value = "1;2, 3"
expected = ["1;2", "3"]
result = format_annotations(value)
self.assertEqual(result, expected)
def test_returns_none_values_is_none(self):
self.assertIsNone(format_annotations(None))
def test_removes_none_and_empty_strings(self):
self.assertIsNone(format_annotations([None, ' ']))
def test_returns_none_no_valid_values(self):
self.assertIsNone(format_annotations([' ']))
def test_typeerror_not_list_set_none_or_str(self):
with self.assertRaises(TypeError):
format_annotations(1)
class TestFormatLabel(TestCase):
def test_returns_none_if_input_is_none(self):
result = format_label(None)
self.assertIsNone(result)
def test_returns_none_empty_string(self):
result = format_label(' ')
self.assertIsNone(result)
def test_cap_by_default(self):
result = format_label('activation')
self.assertEqual(result, 'Activation')
def test_non_cap_if_cap_is_false(self):
result = format_label('activation', capitalize=False)
self.assertEqual(result, 'activation')
def test_strips_white_space(self):
result = format_label(' activation ')
self.assertEqual(result, 'Activation')
def test_converts_int_to_string(self):
result = format_label(1)
self.assertEqual(result, '1')
def test_type_err_not_str_int_or_none(self):
with self.assertRaises(TypeError):
format_annotation(1.1)
with self.assertRaises(TypeError):
format_annotation([])
class TestFormatLabels(TestCase):
def test_removes_duplicates(self):
value = "activation,activation"
expected = ["Activation"]
result = format_labels(value)
self.assertEqual(result, expected)
def test_does_not_cap_when_cap_is_false(self):
value = "activation"
expected = ["activation"]
result = format_labels(value, capitalize=False)
self.assertEqual(result, expected)
def test_removes_duplicate(self):
value = "activation,activation"
expected = ["Activation"]
result = format_labels(value)
self.assertEqual(result, expected)
def test_alpha_orders(self):
value = "inhibition,activation"
expected = ["Activation", 'Inhibition']
result = format_labels(value)
self.assertEqual(result, expected)
def test_captializes(self):
value = "activation"
expected = ["Activation"]
result = format_labels(value)
self.assertEqual(result, expected)
def test_removes_blank(self):
value = "activation,,"
expected = ["Activation"]
result = format_labels(value)
self.assertEqual(result, expected)
def test_strips_whitespace(self):
value = " activation "
expected = ["Activation"]
result = format_labels(value)
self.assertEqual(result, expected)
def test_splits_on_comma(self):
value = "activation;inhibition, acetylation"
expected = ["Acetylation", "Activation;inhibition"]
result = format_labels(value)
self.assertEqual(result, expected)
def test_returns_none_values_is_none(self):
self.assertIsNone(format_labels(None))
def test_removes_none_and_empty_strings(self):
self.assertIsNone(format_labels([None, ' ']))
def test_returns_none_no_valid_values(self):
self.assertIsNone(format_labels([' ']))
def test_typeerror_not_list_set_none_or_str(self):
with self.assertRaises(TypeError):
format_labels(1)
class TestValidateAnnotations(TestCase):
def test_valuerror_check_annotations_invalid_dbtype(self):
with self.assertRaises(ValueError):
validate_annotations(["IPR201", "GO:00001"], dbtype="HELLO")
def test_check_annotations_ignores_falsey_values(self):
self.assertIsNone(validate_annotations("", dbtype="GO"))
self.assertIsNone(validate_annotations([], dbtype="GO"))
self.assertIsNone(validate_annotations([' '], dbtype="GO"))
def test_valueerror_not_go_annotations(self):
with self.assertRaises(ValueError):
validate_annotations(["IPR201", "GO:00001"], dbtype="GO")
def test_valueerror_not_interpro_annotations(self):
with self.assertRaises(ValueError):
validate_annotations(["IPR201", "GO:00001"], dbtype="IPR")
def test_valueerror_not_pfam_annotations(self):
with self.assertRaises(ValueError):
validate_annotations(["IPR201", "PF00001"], dbtype="PF")
def test_check_go_annotations(self):
result = validate_annotations(["GO:00002", "GO:00001"], dbtype="GO")
self.assertEqual(["GO:00001", "GO:00002"], result)
def test_check_interpro_annotations(self):
result = validate_annotations(
["IPR1", "ipr1"], dbtype="INTERPRO", upper=True,
allow_duplicates=True
)
self.assertEqual(["IPR1", "IPR1"], result)
def test_check_pfam_annotations(self):
result = validate_annotations(
["pf1", " ", None, 'PF1'], dbtype="PFAM", upper=True,
allow_duplicates=False
)
self.assertEqual(["PF1"], result)
class TestValidateGO(TestCase):
def test_joins_splits_on_comma(self):
result = validate_go_annotations(["GO:00002", "GO:00001"])
self.assertEqual("GO:00001,GO:00002", result)
result = validate_go_annotations("GO:00002,GO:00001")
self.assertEqual("GO:00001,GO:00002", result)
def test_returns_none_no_valid_annotations(self):
self.assertIsNone(validate_go_annotations([None, '', ' ']))
self.assertIsNone(validate_go_annotations(None))
self.assertIsNone(validate_go_annotations(" "))
class TestValidatePfam(TestCase):
def test_joins_splits_on_comma(self):
result = validate_pfam_annotations(["PF00002", "PF00001"])
self.assertEqual("PF00001,PF00002", result)
result = validate_pfam_annotations("PF00002,PF00001")
self.assertEqual("PF00001,PF00002", result)
def test_returns_none_no_valid_annotations(self):
self.assertIsNone(validate_pfam_annotations([None, '', ' ']))
self.assertIsNone(validate_pfam_annotations(None))
self.assertIsNone(validate_pfam_annotations(" "))
class TestValidateInterpro(TestCase):
def test_joins_splits_on_comma(self):
result = validate_interpro_annotations(["IPR00002", "IPR00001"])
self.assertEqual("IPR00001,IPR00002", result)
result = validate_interpro_annotations("IPR00001,IPR00002")
self.assertEqual("IPR00001,IPR00002", result)
def test_returns_none_no_valid_annotations(self):
self.assertIsNone(validate_interpro_annotations([None, '', ' ']))
self.assertIsNone(validate_interpro_annotations(None))
self.assertIsNone(validate_interpro_annotations(" "))
class TestValidateKeywords(TestCase):
def test_typeerror_not_list_set_none_or_str(self):
with self.assertRaises(TypeError):
validate_keywords(1)
def test_joins_on_comma(self):
result = validate_keywords(["dog", "cat"])
self.assertEqual("Cat,Dog", result)
def test_splits_on_comma(self):
result = validate_keywords("dog,cat")
self.assertEqual("Cat,Dog", result)
def test_removes_duplicates(self):
result = validate_keywords(["dog", "dog"])
self.assertEqual("Dog", result)
def test_removes_white_space(self):
result = validate_keywords('dog, , ')
self.assertEqual("Dog", result)
def test_allows_duplicates(self):
result = validate_keywords('dog,dog', allow_duplicates=True)
self.assertEqual("Dog,Dog", result)
def test_returns_none_no_valid_keywords(self):
self.assertIsNone(validate_keywords([None, '', ' ']))
self.assertIsNone(validate_keywords(None))
self.assertIsNone(validate_keywords(" "))
class TestValidateFunction(TestCase):
def test_typeerror_not_none_or_str(self):
with self.assertRaises(TypeError):
validate_function([])
validate_function(1)
def test_removes_white_space(self):
result = validate_function('dog ')
self.assertEqual("dog", result)
def test_returns_none_no_valid_input(self):
self.assertIsNone(validate_function(None))
self.assertIsNone(validate_function(" "))
class TestValidateDescription(TestCase):
def test_typeerror_not_none_or_str(self):
with self.assertRaises(TypeError):
validate_function([])
validate_function(1)
def test_removes_white_space(self):
result = validate_function('dog ')
self.assertEqual("dog", result)
def test_returns_none_no_valid_input(self):
self.assertIsNone(validate_function(None))
self.assertIsNone(validate_function(" "))
class TestValidateLabels(TestCase):
def test_joins_splits_on_comma(self):
result = validate_labels(["activation", "inhibition"])
self.assertEqual("Activation,Inhibition", result)
result = validate_labels("activation,inhibition")
self.assertEqual("Activation,Inhibition", result)
def test_returns_none_no_valid_labels(self):
self.assertIsNone(validate_labels([None, '', ' ']))
self.assertIsNone(validate_labels(None))
self.assertIsNone(validate_labels(" "))
def test_returns_list_if_requesteed(self):
self.assertEqual(
validate_labels([None, '', ' '], return_list=True), []
)
self.assertEqual(
validate_labels("Activation,Inhibition", return_list=True),
["Activation", "Inhibition"]
)
class TestValidateJointId(TestCase):
def test_sorts_and_joins_on_comma(self):
self.assertEqual(validate_joint_id(2, 1), '1,2')
def test_typeerror_not_int(self):
with self.assertRaises(TypeError):
validate_joint_id([], 1)
validate_joint_id(2, '1')
class TestTrainingHoldoutMustBeLabelled(TestCase):
def test_error_label_is_none_but_training_holdout_is_true(self):
with self.assertRaises(ValueError):
validate_training_holdout_is_labelled(None, True, False)
with self.assertRaises(ValueError):
validate_training_holdout_is_labelled(None, False, True)
# Should not raise error
validate_training_holdout_is_labelled(None, False, False)
class TestValidateGeneId(TestCase):
def test_typeerror_not_none_or_str(self):
with self.assertRaises(TypeError):
validate_gene_id([])
validate_gene_id(1)
def test_removes_white_space(self):
result = validate_gene_id(' EGFR ')
self.assertEqual("EGFR", result)
def test_returns_none_no_valid_input(self):
self.assertIsNone(validate_gene_id(None))
self.assertIsNone(validate_gene_id(" "))
class TestValidateTaxonId(TestCase):
def test_typeerror_not_int(self):
with self.assertRaises(TypeError):
validate_taxon_id([])
validate_taxon_id('1')
class TestValidateBoolean(TestCase):
def test_typeerror_not_bool(self):
with self.assertRaises(TypeError):
validate_boolean([])
validate_boolean('1')
def test_none_converted_to_false(self):
self.assertEqual(validate_boolean(None), False)
class TestValidateInteractionDoesNotExist(TestCase):
def setUp(self):
self.db_path = os.path.normpath(
"{}/databases/test.db".format(base_path)
)
self.session, self.engine = create_session(self.db_path)
delete_database(session=self.session)
self.pa = Protein(uniprot_id='A', taxon_id=9606)
self.pb = Protein(uniprot_id='B', taxon_id=9606)
self.pa.save(self.session, commit=True)
self.pb.save(self.session, commit=True)
self.ia = Interaction(
source=self.pa.id, target=self.pb.id
)
self.ia.save(self.session, commit=True)
def tearDown(self):
delete_database(session=self.session)
cleanup_database(self.session, self.engine)
def test_objectexist_error_if_exists(self):
with self.assertRaises(ObjectAlreadyExists):
validate_interaction_does_not_exist(1, 2)
with self.assertRaises(ObjectAlreadyExists):
validate_interaction_does_not_exist(2, 1)
validate_interaction_does_not_exist(1, 1) # should not raise error
class TestValidateSameTaxonId(TestCase):
def setUp(self):
self.db_path = os.path.normpath(
"{}/databases/test.db".format(base_path)
)
self.session, self.engine = create_session(self.db_path)
delete_database(session=self.session)
self.pa = Protein(uniprot_id='A', taxon_id=9606)
self.pb = Protein(uniprot_id='B', taxon_id=0)
self.pa.save(self.session, commit=True)
self.pb.save(self.session, commit=True)
def tearDown(self):
delete_database(session=self.session)
cleanup_database(self.session, self.engine)
def test_error_not_same_taxon(self):
with self.assertRaises(NonMatchingTaxonomyIds):
validate_same_taxonid('A', 'B')
with self.assertRaises(NonMatchingTaxonomyIds):
validate_same_taxonid(1, 2)
with self.assertRaises(NonMatchingTaxonomyIds):
validate_same_taxonid(1, self.pb)
with self.assertRaises(NonMatchingTaxonomyIds):
validate_same_taxonid(self.pa, 'B')
def test_returns_taxon_id_if_valid(self):
result = validate_same_taxonid('A', 'A')
self.assertEqual(result, 9606)
def test_returns_taxon_id_if_valid_proteins_passed(self):
result = validate_same_taxonid(self.pa, self.pa)
self.assertEqual(result, 9606)
class TestValidateSourceAndTarget(TestCase):
def setUp(self):
self.db_path = os.path.normpath(
"{}/databases/test.db".format(base_path)
)
self.session, self.engine = create_session(self.db_path)
delete_database(session=self.session)
self.pa = Protein(uniprot_id='A', taxon_id=9606)
self.pb = Protein(uniprot_id='B', taxon_id=0)
self.pa.save(self.session, commit=True)
self.pb.save(self.session, commit=True)
def tearDown(self):
delete_database(session=self.session)
cleanup_database(self.session, self.engine)
def test_returns_ids_from_accession(self):
s, t = validate_source_and_target('A', 'B')
self.assertEqual(s, 1)
self.assertEqual(t, 2)
def test_returns_ids_from_proteinn(self):
s, t = validate_source_and_target(self.pa, self.pb)
self.assertEqual(s, 1)
self.assertEqual(t, 2)
def test_returns_ids_from_int(self):
s, t = validate_source_and_target(1, 2)
self.assertEqual(s, 1)
self.assertEqual(t, 2)
def test_raise_error_not_existing(self):
with self.assertRaises(ObjectNotFound):
validate_source_and_target(1, 0)
with self.assertRaises(ObjectNotFound):
validate_source_and_target('A', 'C')
class TestValidateProtein(TestCase):
def setUp(self):
self.db_path = os.path.normpath(
"{}/databases/test.db".format(base_path)
)
self.session, self.engine = create_session(self.db_path)
delete_database(session=self.session)
self.pa = Protein(uniprot_id='A', taxon_id=9606)
self.pb = Protein(uniprot_id='B', taxon_id=0)
self.pa.save(self.session, commit=True)
self.pb.save(self.session, commit=True)
def tearDown(self):
delete_database(session=self.session)
cleanup_database(self.session, self.engine)
def test_typeerr_not_protein_str_or_int(self):
with self.assertRaises(TypeError):
validate_protein(None)
def test_object_not_found_error_non_existing(self):
with self.assertRaises(ObjectNotFound):
validate_protein('C')
with self.assertRaises(ObjectNotFound):
validate_protein(0)
def test_value_error_protein_with_none_id(self):
with self.assertRaises(ValueError):
self.pa.id = None
validate_protein(self.pa)
def test_returns_id(self):
result = validate_protein(self.pa)
self.assertEqual(result, 1)
result = validate_protein('B')
self.assertEqual(result, 2)
result = validate_protein(1)
self.assertEqual(result, 1)
def test_returns_instance_if_true(self):
result = validate_protein(1, return_instance=True)
self.assertEqual(result, self.pa)
class TestValidateAndCheckUniprotIdAndAccession(TestCase):
def setUp(self):
self.db_path = os.path.normpath(
"{}/databases/test.db".format(base_path)
)
self.session, self.engine = create_session(self.db_path)
delete_database(session=self.session)
self.pa = Protein(uniprot_id='A', taxon_id=9606)
self.pubmed = Pubmed(accession='PM1')
self.psimi = Psimi(accession='PSI1', description="hello")
self.pa.save(self.session, commit=True)
self.pubmed.save(self.session, commit=True)
self.psimi.save(self.session, commit=True)
def tearDown(self):
delete_database(session=self.session)
cleanup_database(self.session, self.engine)
def test_type_err_not_string(self):
with self.assertRaises(TypeError):
validate_accession(None)
with self.assertRaises(TypeError):
validate_accession(1)
def test_value_err_empty_string(self):
with self.assertRaises(ValueError):
validate_accession(" ")
def test_object_exists_err_if_check_exists_true_protein(self):
validate_accession("r", Protein, upper=False, check_exists=True)
with self.assertRaises(ObjectAlreadyExists):
validate_accession("a", Protein, upper=True, check_exists=True)
def test_no_object_exists_err_if_unirpot_does_not_exist_protein(self):
value = validate_accession("C", Protein, check_exists=True)
self.assertEqual(value, "C")
def test_object_exists_err_if_check_exists_true_pubmed(self):
with self.assertRaises(ObjectAlreadyExists):
validate_accession("PM1", Pubmed, check_exists=True)
def test_no_object_exists_err_if_unirpto_does_not_exist_pubmed(self):
value = validate_accession("PM2", Pubmed, check_exists=True)
self.assertEqual(value, "PM2")
def test_object_exists_err_if_check_exists_true_psimi(self):
with self.assertRaises(ObjectAlreadyExists):
validate_accession("PSI1", Psimi, check_exists=True)
def test_no_object_exists_err_if_unirpto_does_not_exist_psimi(self):
value = validate_accession("PSI2", Psimi, check_exists=True)
self.assertEqual(value, "PSI2")
def test_no_object_exists_err_if_check_exists_false(self):
value = validate_accession("A", Protein, check_exists=False)
self.assertEqual(value, 'A')
def test_strip_white_space(self):
value = validate_accession(" a ", Protein, check_exists=False)
self.assertEqual(value, "A")
def test_strip_does_not_upper_case_if_upper_is_false(self):
value = validate_accession(
"a", Protein, upper=False, check_exists=False)
self.assertEqual(value, "a")
class TestUniProtAndAccessionDoesNotExist(TestCase):
def setUp(self):
self.db_path = os.path.normpath(
"{}/databases/test.db".format(base_path)
)
self.session, self.engine = create_session(self.db_path)
delete_database(session=self.session)
self.pa = Protein(uniprot_id='A', taxon_id=9606)
self.pubmed = Pubmed(accession='PM1')
self.psimi = Psimi(accession='PSI1', description="hello")
self.pa.save(self.session, commit=True)
self.pubmed.save(self.session, commit=True)
self.psimi.save(self.session, commit=True)
def tearDown(self):
delete_database(session=self.session)
cleanup_database(self.session, self.engine)
def test_attr_err_class_doesnt_have_accession_attr(self):
with self.assertRaises(AttributeError):
validate_accession_does_not_exist(None, None)
def test_err_doesnt_have_accession_attr_pubmed(self):
validate_accession_does_not_exist("PM2", Pubmed)
with self.assertRaises(ObjectAlreadyExists):
validate_accession_does_not_exist("PM1", Pubmed)
def test_err_doesnt_have_accession_attr_psimi(self):
validate_accession_does_not_exist("PSI2", Psimi)
with self.assertRaises(ObjectAlreadyExists):
validate_accession_does_not_exist("PSI1", Psimi)
def test_attr_err_class_doesnt_have_uniprot_id_attr(self):
with self.assertRaises(AttributeError):
validate_uniprot_does_not_exist(None, None)
def test_err_doesnt_have_accession_attr_protein(self):
validate_uniprot_does_not_exist("B", Protein)
with self.assertRaises(ObjectAlreadyExists):
validate_uniprot_does_not_exist("A", Protein)
| |
#!/usr/bin/env python
"""
Density profiles need to be set by:
1) rhos and rs
2) Integrated J-factor at a given radius and scale radius
3) Ingegrated J-factor only (need to approximate scale radius)
It would be good to generalize profile shapes, i.e., following the prescription of Zhou (1996).
Careful `rhos` is a normalization parameter and is *NOT* the same as rho(rs).
"""
from collections import OrderedDict as odict
import numpy as np
import scipy.special as spfn
from pymodeler.model import Model
from pymodeler.parameter import Parameter, Derived
from dmsky.utils.units import Units
class DensityProfile(Model):
"""Am abstract base class for DM density profiles
At a minimum sub-classes need to implement the self._rho(r) method
to compute the density as a function of radius from the center of the halo
"""
_params = odict([
('rs', Parameter(default=1.0)),
('rhos', Parameter(default=1.0)),
('rmin', Parameter(default=0.0)),
('rmax', Parameter(default=np.inf)),
('rhomax', Parameter(default=np.inf)),
('covar', Derived(dtype=np.ndarray, help='Covariance matrix for parameters')),
])
def __call__(self, r):
"""Return the density for given radii.
Parameters
----------
r : `numpy.array` or float
The radii
Returns
-------
values : `numpy.array`
Return values, same shape as the input radii
"""
return self.rho(r)
@property
def deriv_params(self):
"""Return the list of paramters we can take derivatives w.r.t.
"""
return ["rs", "rhos"]
def rho(self, r):
"""Return the density for given radii.
Parameters
----------
r : `numpy.array` or float
The radii
Returns
-------
values : `numpy.array`
Return values, same shape as the input radii
"""
scalar = np.isscalar(r)
r = np.atleast_1d(r)
rho = self._rho(r)
if self.rmin:
rho[r < self.rmin] = self._rho(self.rmin)
if self.rmax:
rho[r > self.rmax] = 0
if self.rhomax:
rho[rho > self.rhomax] = self.rhomax
if scalar:
return np.asscalar(rho)
return rho
def rho_deriv(self, r, paramNames):
"""Return the derivatives of the density as a function of radius,
w.r.t. a list of parameters
Parameters
----------
r : `numpy.array` or float
The radii
paramNames : list
The names of the parameters to differentiation w.r.t.
Returns
-------
matrix : `numpy.array`
An n x m array, where:
n is the number of radii
m is the number of parameters
"""
initParVals = np.array([self.__getattr__(pName) for pName in paramNames])
deltaParVals = initParVals * 0.001
init_r = self._rho(r)
mask = np.invert(np.isfinite(init_r))
if mask.any():
raise ValueError('Tried to get dervatives for infinte rho')
derivs = []
# loop over parameters and take the numerical derivatives
for initPar, deltaPar, parName in zip(initParVals, deltaParVals, paramNames):
par = self.getp(parName)
newParVal = initPar + deltaPar
par.set_value(newParVal)
new_r = self._rho(r)
dr_dp = (new_r - init_r) / (newParVal - initPar)
derivs.append(dr_dp)
par.set_value(initPar)
ret = np.vstack(derivs)
return ret
def rho_uncertainty(self, r):
"""Calculate the uncertainty of the density at given radii
Parameters
----------
r : `numpy.array` or float
The radii
Returns
-------
values : `numpy.array`
Return values, same shape as the input radii
"""
cov_mat = self.covar
if np.isscalar(r):
nr = 1
deriv_vect = np.matrix(self.rho_deriv(r, self.deriv_params))
err2 = (deriv_vect.T * cov_mat * deriv_vect)[0, 0]
else:
nr = len(r)
err2 = np.zeros((nr))
for i, r_i in enumerate(r):
deriv_vect = np.matrix(self.rho_deriv(r_i, self.deriv_params))
err2[i] = deriv_vect * cov_mat * deriv_vect.T
return np.sqrt(err2)
def _rho(self, r):
"""Internal function for sub-class to return the density at given radii
"""
raise NotImplementedError("%s._rho not implemented" % (self.__class__.__name__))
def mass(self, r=None):
"""Compute the mass of the object out to a particular radius.
Parameters
----------
r : `numpy.array` or float
The radii
Returns
-------
values : `numpy.array`
Return values, same shape as the input radii
"""
if r is None:
r = self.rmax
scalar = np.isscalar(r)
r = np.atleast_1d(r)
mass = self._mass(r)
if scalar:
return mass[0]
return mass
def _mass(self, r):
"""Internal function for sub-class to compute the mass at given radii
"""
raise NotImplementedError("%s._mass not implemented" % (self.__class__.__name__))
def set_rho_r(self, rho, r):
"""Fix the density normalization at a given radius.
Parameters
----------
rho : float
The normalization density
r : float
The corresponding radius
"""
rhor = self._rho(r)
rhos = self.getp('rhos')
rhos *= (rho / rhor)
def set_mvir_c(self, mvir, c):
"""Fix the mass inside the virial radius.
Parameters
----------
mvir : float
The virial radius
c : float
Scale factor
"""
rhoc = 9.9E-30 * Units.g_cm3
rvir = np.power(mvir * 3.0 / (177.7 * 4 * np.pi * rhoc * 0.27), 1. / 3.)
rs_val = rvir / c
self.setp('rs', value=rs_val)
mrvir = self.mass(rvir)
rhos = self.getp('rhos')
rhos *= mvir / mrvir
def _covar(self):
"""Default implementation of covariance matrix,
This just uses the parameter errors and ignores the off-diagonal terms
Returns
-------
covs : `numpy.array`
n x n matrix, where n is the number of differentiable parameters
"""
npar = len(self.deriv_params)
m = np.matrix(np.zeros((npar, npar)))
for i, pname in enumerate(self.deriv_params):
par_err = self.getp(pname).symmetric_error
m[i, i] = par_err * par_err
return m
def _cache(self, name=None):
"""Cache any `Derived` paramters that are slow to compute
"""
pass
class UniformProfile(DensityProfile):
""" Uniform spherical profile
rho(r) = rhos for r <= rs
rho(r) = 0 otherwise
"""
def _rho(self, r):
"""Internal function for sub-class to return the density at given radii
"""
x = r / self.rs
return np.where(x <= 1, self.rhos, 0.0)
def _mass(self, r):
"""Internal function for sub-class to compute the mass at at given radii
"""
return 4 * np.pi / 3 * self.rhos * np.where(r < self.rs, r**3, self.rs**3)
class IsothermalProfile(DensityProfile):
""" Non-Singular Isothermal Profile:
Begeman et al. MNRAS 249, 523 (1991)
http://adsabs.harvard.edu/full/1991MNRAS.249..523B
rho(r) = rhos/(1+(r/rs))**2
"""
def _rho(self, r):
"""Internal function for sub-class to return the density at given radii
"""
x = r / self.rs
return self.rhos * (1 + x)**(-2)
def _mass(self, r):
"""Internal function for sub-class to compute the mass at at given radii
"""
x = r / self.rs
return x - (x + 1)**-1 - 2 * np.log(x + 1)
class BurkertProfile(DensityProfile):
"""Burkert ApJ 447, L25 (1995) [Eqn. 2]
http://arxiv.org/abs/astro-ph/9504041
rho(r) = rho0 * r0**3 / ( (r + r0)*(r**2+r0**2) )
==>
rho(r) = rhos / ( (1+r/rs)*(1+(r/rs)**2) )
"""
def _rho(self, r):
"""Internal function for sub-class to return the density at given radii
"""
x = r / self.rs
return self.rhos * ((1 + x) * (1 + x**2))**(-1)
def _mass(self, r):
"""Compute the mass out to given radii analytically
"""
x = r / self.rs
return np.pi * self.rhos * (np.log(x**2 + 1) + 2 * np.log(x + 1) - 2 * np.arctan(x))
class NFWProfile(DensityProfile):
"""Navarro, Frenk, and White, ApJ 462, 563 (1996)
http://arxiv.org/abs/astro-ph/9508025
rho(r) = rhos / ((r/rs) * (1+r/rs)**2)
"""
# def set_jval(self,jval,rs,dist):
# rhos = np.sqrt(3./(4.*np.pi)*jval*dist**2/rs**3)
# self.rs = rs
# self.rhos = rhos
def _mass(self, r):
"""Compute the mass out to given radii analytically
"""
x = r / self.rs
return 4 * np.pi * self.rhos * self.rs**3 * (np.log(1 + x) - x / (1 + x))
def jvalue_fast(self, r=None):
"""Fast integrated J-factor computation
"""
if r is None:
r = self.rmax
x = r / self.rs
return (4 * np.pi / 3.) * self.rhos**2 * self.rs**3 * (1 - (1 + x)**-3)
def _rho(self, r):
"""Internal function for sub-class to compute the mass at given radii
"""
x = r / self.rs
return self.rhos * x**-1 * (1 + x)**-2
class EinastoProfile(DensityProfile):
""" Einasto profile
Einasto Trudy Inst. Astrofiz. Alma-Ata 5, 87 (1965) (Russian) [Eqn. 4]
http://adsabs.harvard.edu/abs/1965TrAlm...5...87E
rho(r) = rhos*exp(-2*((r/rs)**alpha-1)/alpha)
==>
"""
_params = odict(
list(DensityProfile._params.items()) +
[
('alpha', Parameter(default=0.17)),
])
@property
def deriv_params(self):
"""Return the list of paramters we can take derivatives w.r.t.
"""
return ["rs", "rhos", "alpha"]
def _mass(self, r):
"""Compute the mass out to given radii analytically
"""
x = r / self.rs
gamma = spfn.gamma(3. / self.alpha)
gammainc = spfn.gammainc(3. * self.alpha**-1, (2. * self.alpha**(-1) * x**self.alpha))
alphainv = self.alpha**-1
return 4 * np.pi * self.rhos * self.rs**3 * alphainv * \
np.exp(2. * alphainv) * \
np.power(2. * alphainv, -3. * alphainv) * \
gamma * gammainc
def _rho(self, r):
"""Return the denisty at a given radius
"""
x = r / self.rs
return self.rhos * np.exp(-2. * self.alpha**-1 * (x**(self.alpha) - 1))
class GNFWProfile(DensityProfile):
""" Generalized NFW Profile
Strigari et al. ApJ 678, 614 (2008) [Eqn. 3]
http://arxiv.org/abs/0709.1510
rho(r) = rhos / ( (r/rs)**gamma * (1+r/rs)**(3-gamma))
"""
_params = odict(
list(DensityProfile._params.items()) +
[
('gamma', Parameter(default=1.)),
])
@property
def deriv_params(self):
"""Return the list of paramters we can take derivatives w.r.t.
"""
return ["rs", "rhos", "gamma"]
def _rho(self, r):
"""Internal function for sub-class to return the density at given radii
"""
x = r / self.rs
return self.rhos * x**(-self.gamma) * (1 + x)**(self.gamma - 3)
def _mass(self, r):
"""Compute the mass out to given radii analytically
"""
raise NotImplementedError("No analytic function for mass of GNFWProfile")
class ZhouProfile(DensityProfile):
"""Generalized double power-law models
Zhou MNRAS 278, 488 (1996) [Eqn. 1]
http://arxiv.org/abs/astro-ph/9509122
rho(r) = C * (r/rs)**-gamma * (1 + (r/rs)**1/alpha))**-(beta-gamma)*alpha
C = 4 * rhos
also see...
Zhou MNRAS 287, 525 (1997) [Eqn. 2]
http://arxiv.org/abs/astro-ph/9605029
Strigari et al., Nature 454 (2008) [Eqn. 8]
http://arxiv.org/abs/0808.3772
"""
_params = odict(
list(DensityProfile._params.items()) +
[
('alpha', Parameter(default=1.)),
('beta', Parameter(default=3.)),
('gamma', Parameter(default=1.)),
])
@property
def deriv_params(self):
"""Return the list of paramters we can take derivatives w.r.t.
"""
return ["rs", "rhos", "alpha", "beta", "gamma"]
def _rho(self, r):
"""Internal function for sub-class to return the density at given radii
"""
x = r / self.rs
return self.rhos * x**-self.gamma * \
(1 + x**(1 / self.alpha))**(-(self.beta - self.gamma) * self.alpha)
def _mass(self, r):
"""Compute the mass out to given radii analytically
"""
raise NotImplementedError("No analytic function for mass of ZhouProfile")
Uniform = UniformProfile
Isothermal = IsothermalProfile
Burkert = BurkertProfile
NFW = NFWProfile
Einasto = EinastoProfile
gNFW = GNFWProfile
Zhou = ZhouProfile
def scale_list(l, scale_value):
"""Scale all the parameters on a list by the same value
The operates on the list in place
Parameters
----------
l : list
scale_value : float
Returns
-------
l : list
"""
for i, v in enumerate(l):
l[i] = v * scale_value
return l
def scale_dict(d, scale_value):
"""Scale all the parameters on in a dict by the same value
The operates on the dictionary in place
Parameters
----------
d : dict
scale_value : float
Returns
-------
d : dict
Dictionary with scaled values.
"""
for k, v in d.items():
if isinstance(v, list):
d[k] = scale_list(v, scale_value)
else:
d[k] = v * scale_value
return d
def scale_param(p, scale_value):
"""Generic function to scale parameters.
Works for lists and dicts as well as simple paramters
This operates in place.
Parameters
----------
p : dict or list or `Parameter`
scale_value : float
Returns
-------
p : dict or list or `Parameter`
"""
if isinstance(p, dict):
return scale_dict(p, scale_value)
elif isinstance(p, list):
return scale_list(p, scale_value)
return p * scale_value
def scale_dict_param(d, k, scale_value, default_value):
"""Scale a parameter in a dict, or assign a default value
Parameters
----------
d : dict
Input dictrionary
k : str
Key of `Parameter` to modify
scale_value : float
Value to scale existing `Parameter` by
default_value : float
Value to assign if k is not in d
"""
try:
d[k] = scale_param(d[k], scale_value)
except KeyError:
d[k] = scale_value * default_value
def factory(ptype, **kwargs):
"""Factory method to build `DenityProfile` objects
Keyword arguments are passed to class c'tor
Parameters
----------
ptype : str
Density profile type
Returns
-------
profile : `DensityProfile`
Newly created object
"""
import dmsky.factory
prof_copy = kwargs.copy()
units = prof_copy.pop('units', None)
if units:
density, distance = units.rsplit('_', 1)
scale_density = getattr(Units, density)
scale_distance = getattr(Units, distance)
scale_dict_param(prof_copy, 'rhos', scale_density, DensityProfile._params['rhos'].default)
scale_dict_param(prof_copy, 'rs', scale_distance, DensityProfile._params['rs'].default)
scale_dict_param(prof_copy, 'rmin', scale_distance, DensityProfile._params['rmin'].default)
scale_dict_param(prof_copy, 'rmax', scale_distance, DensityProfile._params['rmax'].default)
scale_dict_param(prof_copy, 'rhomax', scale_density,
DensityProfile._params['rhomax'].default)
return dmsky.factory.factory(ptype, module=__name__, **prof_copy)
| |
from __future__ import unicode_literals
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import six, timezone
from django.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = dict(
BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 65535),
PositiveIntegerField=(0, 4294967295),
)
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = {
'year': '%%Y-01-01',
'month': '%%Y-%%m-01',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str)
else:
return "DATE(%s)" % (field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = "DATE(%s)" % field_name
return sql, params
def datetime_cast_time_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = "TIME(%s)" % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def time_trunc_sql(self, lookup_type, field_name):
fields = {
'hour': '%%H:00:00',
'minute': '%%H:%%i:00',
'second': '%%H:%%i:%%s',
} # Use double percents to escape.
if lookup_type in fields:
format_str = fields[lookup_type]
return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str)
else:
return "TIME(%s)" % (field_name)
def date_interval_sql(self, timedelta):
return "INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND" % (
timedelta.days, timedelta.seconds, timedelta.microseconds), []
def format_for_duration_arithmetic(self, sql):
if self.connection.features.supports_microsecond_precision:
return 'INTERVAL %s MICROSECOND' % sql
else:
return 'INTERVAL FLOOR(%s / 1000000) SECOND' % sql
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def fulltext_search_sql(self, field_name):
# RemovedInDjango20Warning
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return six.text_type(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
"""
MySQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_textfield_value(self, value, expression, connection, context):
if value is not None:
value = force_text(value)
return value
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def binary_placeholder_sql(self, value):
return '_binary %s' if value is not None else '%s'
def subtract_temporals(self, internal_type, lhs, rhs):
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
if self.connection.features.supports_microsecond_precision:
if internal_type == 'TimeField':
return (
"((TIME_TO_SEC(%(lhs)s) * POW(10, 6) + MICROSECOND(%(lhs)s)) -"
" (TIME_TO_SEC(%(rhs)s) * POW(10, 6) + MICROSECOND(%(rhs)s)))"
) % {'lhs': lhs_sql, 'rhs': rhs_sql}, lhs_params * 2 + rhs_params * 2
else:
return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), rhs_params + lhs_params
elif internal_type == 'TimeField':
return (
"(TIME_TO_SEC(%s) * POW(10, 6) - TIME_TO_SEC(%s) * POW(10, 6))"
) % (lhs_sql, rhs_sql), lhs_params + rhs_params
else:
return "(TIMESTAMPDIFF(SECOND, %s, %s) * POW(10, 6))" % (rhs_sql, lhs_sql), rhs_params + lhs_params
| |
from httplib import HTTPConnection, HTTPSConnection, HTTPMessage
from cStringIO import StringIO
import logging
import quopri
import zlib
from ..recording import ReplayRecordingManager
logger = logging.getLogger(__name__)
class ReplayError(Exception):
"""Generic error base class for the httreplay library."""
pass
class ReplayConnectionHelper:
"""
Mixin that provides the ability to serialize and deserialize
requests and responses into a recording.
"""
def __init__(self):
self.__fake_send = False
self.__recording_data = None
# Some hacks to manage the presence (or not) of the connection's
# socket. Requests 2.x likes to set settings on the socket, but
# only checks whether the connection hasattr('sock') -- not whether
# the sock itself is None (which is actually its default value,
# and which httplib likes to see.) Yeesh.
def __socket_del(self):
if hasattr(self, 'sock') and (self.sock is None):
del self.sock
def __socket_none(self):
if not hasattr(self, 'sock'):
self.sock = None
@property
def __recording(self):
"""Provide the current recording, or create a new one if needed."""
recording = self.__recording_data
if not recording:
recording = self.__recording_data = \
ReplayRecordingManager.load(
self._replay_settings.replay_file_name)
return recording
# All httplib requests use the sequence putrequest(), putheader(),
# then endheaders() -> _send_output() -> send()
def putrequest(self, method, url, **kwargs):
self.__socket_none()
# Store an incomplete request; this will be completed when
# endheaders() is called.
self.__request = dict(
method=method,
_url=url,
_headers={},
)
return self._baseclass.putrequest(self, method, url, **kwargs)
def putheader(self, header, *values):
self.__socket_none()
# Always called after putrequest() so the dict is prepped.
val = self.__request['_headers'].get(header)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
val = '' if val is None else val + ','
val += '\r\n\t'.join(values)
self.__request['_headers'][header] = val
return self._baseclass.putheader(self, header, *values)
def endheaders(self, message_body=None):
self.__socket_del()
# If a key generator for the URL is provided, use it.
# Otherwise, simply use the URL itself as the URL key.
url = self.__request.pop('_url')
if self._replay_settings.url_key:
url_key = self._replay_settings.url_key(url)
else:
url_key = url
# If a key generator for the headers is provided, use it.
# Otherwise, simply use the headers directly.
headers = self.__request.pop('_headers')
if self._replay_settings.headers_key:
headers_key = self._replay_settings.headers_key(headers)
else:
headers_key = headers
# message_body can be a file; handle that before generating
# body_key
if message_body and callable(getattr(message_body, 'read', None)):
body_content = message_body.read()
message_body = StringIO(body_content) # for continuity
else:
body_content = message_body
# If a key generator for the body is provided, use it.
# Otherwise, simply use the body itself as the body key.
if body_content is not None and self._replay_settings.body_key:
body_key = self._replay_settings.body_key(body_content)
else:
body_key = body_content
self.__request.update(dict(
# method already present
url=url_key,
headers=headers_key,
host=self.host,
port=self.port,
))
# JSON encoder will try to decode bytes as UTF-8.
# Do this in advance so we have a chance to handle binary data.
if isinstance(body_key, str):
try:
self.__request['body'] = body_key.decode('utf8')
except UnicodeDecodeError:
self.__request['body_base64'] = body_key.encode('base64')
else:
# Probably unicode or None.
self.__request['body'] = body_key
# endheaders() will eventually call send()
logstr = '%(method)s %(host)s:%(port)s%(url)s' % self.__request
if self.__request in self.__recording:
logger.debug("ReplayConnectionHelper found %s", logstr)
self.__fake_send = True
else:
logger.debug("ReplayConnectionHelper trying %s", logstr)
result = self._baseclass.endheaders(self, message_body)
self.__fake_send = False
return result
def send(self, msg):
if not self.__fake_send:
self.__socket_none()
return self._baseclass.send(self, msg)
def getresponse(self, buffering=False):
"""
Provide a response from the current recording if possible.
Otherwise, perform the network request. This function ALWAYS
returns ReplayHTTPResponse() regardless so it's consistent between
initial recording and later.
"""
self.__socket_none()
replay_response = self.__recording.get(self.__request)
if replay_response:
# Not calling the underlying getresponse(); do the same cleanup
# that it would have done. However since the cleanup is on
# class-specific members (self.__state and self.__response) this
# is the easiest way.
self.close()
elif self._replay_settings.allow_network:
logger.debug("ReplayConnectionHelper calling %s.getresponse()", self._baseclass.__name__)
response = self._baseclass.getresponse(self)
replay_response = ReplayHTTPResponse.make_replay_response(response)
self.__recording[self.__request] = replay_response
ReplayRecordingManager.save(
self.__recording,
self._replay_settings.replay_file_name)
else:
logger.debug("ReplayConnectionHelper 418 (allow_network=False)")
replay_response = dict(
status=dict(code=418, message="I'm a teapot"),
headers={},
body_text='Blocked by allow_network=False')
return ReplayHTTPResponse(replay_response, method=self.__request['method'])
def close(self):
self.__socket_none()
self._baseclass.close(self)
class ReplayHTTPConnection(ReplayConnectionHelper, HTTPConnection):
"""Generic HTTPConnection with replay."""
_baseclass = HTTPConnection
def __init__(self, *args, **kwargs):
HTTPConnection.__init__(self, *args, **kwargs)
ReplayConnectionHelper.__init__(self)
class ReplayHTTPSConnection(ReplayConnectionHelper, HTTPSConnection):
"""Generic HTTPSConnection with replay."""
_baseclass = HTTPSConnection
def __init__(self, *args, **kwargs):
# httplib.HTTPConnection has been replaced by ReplayHTTPConnection,
# so doing it this way rather than calling through
# HTTPSConnection.__init__ allows us to use the original one.
self.key_file = kwargs.pop('key_file', None)
self.cert_file = kwargs.pop('cert_file', None)
HTTPConnection.__init__(self, *args, **kwargs)
ReplayConnectionHelper.__init__(self)
class ReplayHTTPResponse(object):
"""
A replay response object, with just enough functionality to make
the various HTTP/URL libraries out there happy.
"""
__text_content_types = (
'text/',
'application/json',
)
def __init__(self, replay_response, method=None):
self.reason = replay_response['status']['message']
self.status = replay_response['status']['code']
self.version = None
if 'body_text' in replay_response:
# JSON decoder returns unicode, not str, so this needs to be
# encoded to properly reproduce content off the wire.
self._content = replay_response['body_text'].encode('utf8')
elif 'body_quoted_printable' in replay_response:
# quopri.decodestring returns str, which is correct for content off
# the wire.
self._content = quopri.decodestring(replay_response['body_quoted_printable'])
else:
# .decode('base64') returns str, which is correct for content off
# the wire.
self._content = replay_response['body'].decode('base64')
self.fp = StringIO(self._content)
msg_fp = StringIO('\r\n'.join('{}: {}'.format(h, v)
for h, v in replay_response['headers'].iteritems()))
self.msg = HTTPMessage(msg_fp)
self.msg.fp = None # httplib does this, okay?
length = self.msg.getheader('content-length')
self.length = int(length) if length else None
# Save method to handle HEAD specially as httplib does
self._method = method
@classmethod
def make_replay_response(cls, response):
"""
Converts real response to replay_response dict which can be saved
and/or used to initialize a ReplayHTTPResponse.
"""
replay_response = {}
body = response.read() # undecoded byte string
# Add body to replay_response. Try to use simple text, falling back to
# quoted printable or base64 as required for binary responses.
if response.getheader('content-type', '') \
.startswith(cls.__text_content_types):
if response.getheader('content-encoding') in ['gzip', 'deflate']:
# http://stackoverflow.com/questions/2695152
body = zlib.decompress(body, 16 + zlib.MAX_WBITS)
del response.msg['content-encoding']
# decompression changes the length
if 'content-length' in response.msg:
response.msg['content-length'] = str(len(body))
try:
# Store body directly as text if it will decode properly.
body.decode('utf8')
replay_response['body_text'] = body
except UnicodeDecodeError:
# Store body as quoted printable.
# Remove unneccessary =\n pairs which make searching hard.
# These exist for line-wrapping in email, which is entirely
# pointless here.
body_quoted_printable = quopri.encodestring(body)
body_quoted_printable = body_quoted_printable.replace('=\n', '')
replay_response['body_quoted_printable'] = body_quoted_printable
else:
replay_response['body'] = body.encode('base64')
replay_response.update(dict(
status=dict(code=response.status, message=response.reason),
headers=dict(response.getheaders())))
return replay_response
def close(self):
self.fp = None
def isclosed(self):
return self.fp is None
def read(self, amt=None):
"""
The important parts of HTTPResponse.read()
"""
if self.fp is None:
return ''
if self._method == 'HEAD':
self.close()
return ''
if self.length is not None:
amt = min(amt, self.length)
# StringIO doesn't like read(None)
s = self.fp.read() if amt is None else self.fp.read(amt)
if not s:
self.close()
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def getheader(self, name, default=None):
return self.msg.getheader(name, default)
def getheaders(self):
return self.msg.items()
| |
'''
Utility functions for working with Coda Plugin Skeleton
@author Justin Hileman <http://justinhileman.com>
'''
import re
from types import StringTypes
import AppKit
from Foundation import *
import cp_html_replace as html_replace
import cp_html_matcher as html_matcher
def is_line_ending(content, index, line_ending):
'''Checks whether the character(s) at index equals line_ending'''
end = index + len(line_ending)
return len(content) >= end and content[index:end] == line_ending
def end_is_line_ending(content, line_ending):
'''Convenience function for checking the last characters of a string against the line_ending'''
return content.endswith(line_ending)
def get_line_before(context, range = None):
line, line_range = get_line_before_and_range(context, range)
return line
def get_line_before_and_range(context, range = None):
'''Get the full line immediately before the current (or supplied) range'''
line_ending = get_line_ending(context)
if range is None: range = get_range(context)
content = context.string()
end = content.rfind(line_ending, 0, range.location)
if end == -1:
return None, range
else:
end = end + len(line_ending)
start = content.rfind(line_ending, 0, end - len(line_ending))
if start == -1:
start = 0
else:
start += len(line_ending)
start = max(0, start)
end = min(end, len(content))
line_range = new_range(start, end - start)
return get_selection(context, line_range), line_range
def get_line_after(context, range = None):
line, line_range = get_line_after_and_range(context, range)
return line
def get_line_after_and_range(context, range = None):
'''Get the full line immediately after the current (or supplied) range'''
line_ending = get_line_ending(context)
len_line_ending = len(line_ending)
if range is None: range = get_range(context)
content = context.string()
start = range.location + range.length
if not is_line_ending(content, start - len_line_ending, line_ending):
start = content.find(line_ending, start)
if start == -1:
return None, range
else:
start += len_line_ending
end = content.find(line_ending, start)
if end == -1:
end = len(content)
else:
end += len_line_ending
start = max(0, start)
end = min(end, len(content))
line_range = new_range(start, end - start)
return get_selection(context, line_range), line_range
def lines_and_range(context, range = None):
'''Get the range of the full lines containing the current (or supplied) range'''
line_ending = get_line_ending(context)
len_line_ending = len(line_ending)
if range is None: range = get_range(context)
content = context.string()
start, end = range.location, range.location + range.length
if not is_line_ending(content, start - len_line_ending, line_ending):
start = content.rfind(line_ending, 0, start)
if start == -1:
start = 0
else:
start += len_line_ending
# select to the end of the line (if it's not already selected)
if not is_line_ending(content, end, line_ending):
# edge case: cursor is at start of line and more than one line selected:
if not is_line_ending(content, end - len_line_ending, line_ending) or len(content[start:end].split(line_ending)) <= 1:
end = content.find(line_ending, end)
if end == -1:
end = len(content)
else:
end += len_line_ending
# edge case: empty line, not selected
elif is_line_ending(content, end - len_line_ending, line_ending):
if len(content[start:end].split(line_ending)) <= 1:
end = content.find(line_ending, end)
if end == -1:
end = len(content)
else:
end += len_line_ending
else:
end += len_line_ending
start = max(0, start)
end = min(end, len(content))
line_range = new_range(start, end - start)
return get_selection(context, line_range), line_range
def balance_line_endings(first, second, line_ending):
'''Swaps the line endings on first and second lines'''
len_line_ending = len(line_ending)
if first[-len_line_ending:] != line_ending:
first += line_ending
if second[-len_line_ending:] == line_ending: second = second[:-len_line_ending]
return first, second
def insert_text_with_insertion_point(context, text, range):
'''Extracts insertion point tokens, inserts the text, and selects the insertion point range'''
text, ip_range = extract_insertion_point_range(text)
if ip_range:
select_range = new_range(range.location + ip_range.location, ip_range.length)
insert_text_and_select(context, text, range, select_range)
else:
insert_text(context, text, range)
def extract_insertion_point_range(text):
'''Extract a range for the insertion point delimited by `$$IP$$` and `$$`.'''
start_token = '$$IP$$'
end_token = '$$'
start = text.find(start_token)
if start == -1:
return text, None
else:
text = text.replace(start_token, '', 1)
end = text.find(end_token, start);
if end == -1:
return text, new_range(start, 0)
else:
text = text.replace(end_token, '', 1)
return text, new_range(start, end - start)
def words_and_range(context, range = None):
'''Get text and range for the current word(s)'''
# word characters, for our purposes. include < and > for html, $ for php variables.
word_chars = 'a-zA-Z0-9_\\?|`<>\$'
line_ending = get_line_ending(context)
text = context.string()
line_text, line_range = lines_and_range(context)
if range is None:
selection, selection_range = selection_and_range(context)
else:
selection_range = range
selection = get_selection(context, selection_range)
# move the start (out)
prefix_start = line_range.location
prefix_end = selection_range.location
if prefix_end > prefix_start and not re.match('[^%s]' % word_chars, selection):
result = rfind_not_chars(text[prefix_start:prefix_end], word_chars)
if result != -1:
start = prefix_start + result
else:
start = prefix_start
selection_range = new_range(start, selection_range.length + prefix_end - start)
selection = get_selection(context, selection_range)
suffix_start = selection_range.location + selection_range.length
suffix_end = line_range.location + line_range.length
if suffix_start < suffix_end and not re.search('[^%s]$' % word_chars, selection):
result = find_not_chars(text[suffix_start:suffix_end], word_chars)
if result != -1:
end = result + suffix_start
else:
end = suffix_end
selection_range = new_range(selection_range.location, end - selection_range.location)
selection = get_selection(context, selection_range)
return selection, selection_range
def beep():
'''System beep!'''
AppKit.NSBeep()
def find_chars(text, chars):
'''Find the first index of any character from chars in text. chars is the guts of a regex []'''
result = re.search('[%s]' % chars, text)
if result:
return result.start()
else:
return -1
def rfind_chars(text, chars):
'''Find the last index of any character from chars in text. chars is the guts of a regex []'''
result = re.search('([%s])[^%s]*$' % (chars, chars), text)
if result:
return result.end(1)
else:
return -1
def find_not_chars(text, chars):
'''Find the first index of any character not in chars in text. chars is the guts of a regex []'''
result = re.search('[^%s]' % chars, text)
if result:
return result.start()
else:
return -1
def rfind_not_chars(text, chars):
'''Find the last index of any character not in chars in text. chars is the guts of a regex []'''
result = re.search('([^%s])[%s]*$' % (chars, chars), text)
if result:
return result.end(1)
else:
return -1
# ===============================================================
# From TEA for Coda
# ===============================================================
# ===============================================================
# Interact with the user and output information
# ===============================================================
def say(context, title, message,
main_button=None, alt_button=None, other_button=None):
'''Displays a dialog with a message for the user'''
alert = NSAlert.alertWithMessageText_defaultButton_alternateButton_otherButton_informativeTextWithFormat_(
title,
main_button,
alt_button,
other_button,
message
)
if context.window() is not None:
return alert.beginSheetModalForWindow_modalDelegate_didEndSelector_contextInfo_(
context.window(), None, None, None
)
else:
return alert.runModal()
def log(message):
'''
Convenience function for logging messages to console
Please make sure they are strings before you try to log them; wrap
anything you aren't sure of in str()
'''
NSLog(str(message))
# ===============================================================
# Coda-specific options helpers
# ===============================================================
def get_option(options, option, default=None):
option = options.objectForKey_(option)
if default is not None and option is None:
option = default
return option
def get_context(controller, sender=None):
return controller.focusedTextView_(sender)
# ===============================================================
# Preference lookup shortcuts
# ===============================================================
def get_line_ending(context):
'''Shortcut function to get the line-endings for the context'''
return context.lineEnding()
def get_indentation_string(context):
'''Shortcut to retrieve the indentation string'''
if context.usesTabs():
tabString = '\t'
else:
tabString = ' ' * context.tabWidth()
return tabString
# ===============================================================
# Text manipulations and helper functions
# ===============================================================
def parse_word(selection):
'''
Extract the first word from a string
Returns the word:
parse_word('p class="stuff"') => word = 'p'
'''
matches = re.match(r'(([a-zA-Z0-9_-]+)\s*.*)$', selection)
if matches == None:
return None
return matches.group(2)
def string_to_tag(string):
'''
Parses a string into a tag with id and class attributes
For example, div#stuff.good.things translates into
`div id="stuff" class="good things"`
'''
if string.find('#') > 0 or string.find('.') > 0:
match = re.search('#([a-zA-Z0-9_-]+)', string)
if match:
id = match.group(1)
else:
id = False
matches = re.findall('\.([a-zA-Z0-9_-]+)', string)
classes = ''
for match in matches:
if classes:
classes += ' '
classes += match
tag = parse_word(string)
if id:
tag += ' id="' + id + '"'
if classes:
tag += ' class="' + classes + '"'
return tag
else:
return string
def is_selfclosing(tag):
'''Checks a tag and returns True if it's a self-closing XHTML tag'''
# For now, we're just checking off a list
selfclosing = ['img', 'input', 'br', 'hr', 'link', 'base', 'meta']
# Make sure we've just got the tag
if not tag.isalnum():
tag = parse_word(tag)
if tag is None:
return False
return tag in selfclosing
def encode_ampersands(text, enc='&'):
'''Encodes ampersands'''
return re.sub('&(?!([a-zA-Z0-9]+|#[0-9]+|#x[0-9a-fA-F]+);)', enc, text)
def named_entities(text):
'''Converts Unicode characters into named HTML entities'''
text = text.encode('ascii', 'html_replace')
return encode_ampersands(text)
def numeric_entities(text, ampersands=None):
'''Converts Unicode characters into numeric HTML entities'''
text = text.encode('ascii', 'xmlcharrefreplace')
if ampersands == 'numeric':
return encode_ampersands(text, '&')
elif ampersands == 'named':
return encode_ampersands(text)
else:
return text
def entities_to_hex(text, wrap):
'''
Converts HTML entities into hexadecimal; replaces $HEX in wrap
with the hex code
'''
# This is a bit of a hack to make the variable available to the function
wrap = [wrap]
def wrap_hex(match):
hex = '%X' % int(match.group(2))
while len(hex) < 4:
hex = '0' + hex
return wrap[0].replace('$HEX', hex)
return re.sub(r'&(#x?)?([0-9]+|[0-9a-fA-F]+);', wrap_hex, text)
def trim(context, text, lines=True, sides='both', respect_indent=False,
preserve_linebreaks=True, discard_empty=False):
'''
Trims whitespace from the text
If lines=True, will trim each line in the text.
sides can be both, start, or end and dictates where trimming occurs.
If respect_indent=True, indent characters at the start of lines will be
left alone (specific character determined by preferences)
If discard_empty=True, whitespace on empty lines will be discarded
regardless of indentation status
'''
def trimit(text, sides, indent, preserve_linebreaks, discard_empty):
'''Utility function for trimming the text'''
# If we're discarding empties, check for empty line
if discard_empty:
match = re.match(r'\s*?([\n\r]+)$', text)
if match:
return match.group(1)
# Preserve the indent if an indent string is passed in
if (sides.lower() == 'both' or sides.lower() == 'start') and indent != '':
match = re.match('(' + indent + ')+', text)
if match:
indent_chars = match.group(0)
else:
indent_chars = ''
else:
indent_chars = ''
# Preserve the linebreaks at the end if needed
match = re.search(r'[\n\r]+$', text)
if match and preserve_linebreaks:
linebreak = match.group(0)
else:
linebreak = ''
# Strip that whitespace!
if sides.lower() == 'start':
text = text.lstrip()
elif sides.lower() == 'end':
text = text.rstrip()
else:
text = text.strip()
return indent_chars + text + linebreak
# Set up which characters to treat as indent
if respect_indent:
indent = get_indentation_string(context)
else:
indent = ''
finaltext = ''
if lines:
for line in text.splitlines(True):
finaltext += trimit(line, sides, indent, preserve_linebreaks, discard_empty)
else:
finaltext = trimit(text, sides, indent, preserve_linebreaks, discard_empty)
return finaltext
def unix_line_endings(text):
'''Converts all line endings to Unix'''
if text.find('\r\n') != -1:
text = text.replace('\r\n','\n')
if text.find('\r') != -1:
text = text.replace('\r','\n')
return text
def clean_line_endings(context, text, line_ending=None):
'''
Converts all line endings to the default line ending of the file,
or if line_ending is specified uses that
'''
text = unix_line_endings(text)
if line_ending is None:
target = get_line_ending(context)
else:
target = line_ending
return text.replace('\n', target)
# ===============================================================
# Working with ranges and selected text
# ===============================================================
def new_range(location, length):
'''Convenience function for creating an NSRange'''
return NSMakeRange(location, length)
def get_selection(context, range):
'''Convenience function; returns selected text within a given range'''
return context.string().substringWithRange_(range)
def set_selected_range(context, range):
'''Sets the selection to the single range passed as an argument'''
context.setSelectedRange_(range)
def get_line(context):
return context.currentLine(), context.rangeOfCurrentLine()
def get_range(context):
return context.selectedRange()
def selection_and_range(context, with_errors=False):
'''
If there's a single selection, returns the selected text,
otherwise throws optional descriptive errors
Returns a tuple with the selected text first and its range second
'''
range = context.selectedRange()
if range.length is 0:
if with_errors:
say(
context, "Error: selection required",
"You must select some text in order to use this action."
)
return '', range
return get_selection(context, range), range
def get_character(context, range):
'''Returns the character immediately preceding the cursor'''
if range.location > 0:
range = new_range(range.location - 1, 1)
return get_selection(context, range), range
else:
return None, range
def get_word(context, range, alpha_numeric=True, extra_characters='_-',
bidirectional=True):
'''
Selects and returns the current word and its range from the passed range
By default it defines a word as a contiguous string of alphanumeric
characters plus extra_characters. Setting alpha_numeric to False will
define a word as a contiguous string of alphabetic characters plus
extra_characters
If bidirectional is False, then it will only look behind the cursor
'''
# Helper regex for determining if line ends with a tag
# Includes checks for ASP/PHP/JSP/ColdFusion closing delimiters
re_tag = re.compile(r'(<\/?[\w:\-]+[^>]*|\s*(\?|%|-{2,3}))>$')
def test_word():
# Mini-function to cut down on code bloat
if alpha_numeric:
return all(c.isalnum() or c in extra_characters for c in char)
else:
return all(char.isalpha() or c in extra_characters for c in char)
def ends_with_tag(cur_index):
# Mini-function to check if line to index ends with a tag
linestart = context.rangeOfCurrentLine().location
text = get_selection(
context, new_range(linestart, cur_index - linestart + 1)
)
return re_tag.search(text) != None
# Set up basic variables
index = range.location
word = ''
maxlength = context.string().length()
if bidirectional:
# Make sure the cursor isn't at the end of the document
if index != maxlength:
# Check if cursor is mid-word
char = get_selection(context, new_range(index, 1))
if test_word():
inword = True
# Parse forward until we hit the end of word or document
while inword:
char = get_selection(context, new_range(index, 1))
if test_word():
word += char
else:
inword = False
index += 1
if index == maxlength:
inword = False
else:
# lastindex logic assumes we've been incrementing as we go,
# so bump it up one to compensate
index += 1
lastindex = index - 1 if index < maxlength else index
else:
# Only parsing backward, so final index is cursor
lastindex = range.location
# Reset index to one less than the cursor
index = range.location - 1
# Only walk backwards if we aren't at the beginning
if index >= 0:
# Parse backward to get the word ahead of the cursor
inword = True
while inword:
char = get_selection(context, new_range(index, 1))
if test_word() and not (char == '>' and ends_with_tag(index)):
word = char + word
index -= 1
else:
inword = False
if index < 0:
inword = False
# Since index is left-aligned and we've overcompensated,
# need to increment +1
firstindex = index + 1
# Switch last index to length for use in range
lastindex = lastindex - firstindex
range = new_range(firstindex, lastindex)
return word, range
def get_word_or_selection(context, range, alpha_numeric=True,
extra_characters='_-', bidirectional=True):
'''
Selects and returns the current word and its range from the passed range,
or if there's already a selection returns the contents and its range
See get_word() for an explanation of the extra arguments
'''
if range.length == 0:
return get_word(context, range, alpha_numeric, extra_characters, bidirectional)
else:
return get_selection(context, range), range
def indent_snippet(context, snippet, range):
'''
Sets a snippet's indentation level to match that of the line starting
at the location of range
'''
# Are there newlines?
if re.search(r'[\n\r]', snippet) is not None:
# Check if line is indented
line = context.rangeOfCurrentLine()
# Check if line is actually indented
if line.location != range.location:
line = get_selection(context, line)
match = re.match(r'([ \t]+)', line)
# Only indent if the line starts with whitespace
if match is not None:
current_indent = match.group(1)
indent_string = get_indentation_string(context)
# Convert tabs to indent_string and indent each line
if indent_string != '\t':
snippet = snippet.replace('\t', indent_string)
lines = snippet.splitlines(True)
# Convert to iterator so we can avoid processing item 0
lines = iter(lines)
snippet = lines.next()
for line in lines:
snippet += current_indent + line
if re.search(r'[\n\r]$', snippet) is not None:
# Ends with a newline, add the indent
snippet += current_indent
return snippet
# ===============================================================
# Check document syntax methods
# ===============================================================
def get_zen_doctype(context, default='html'):
'''
Tests the document to see if it is CSS or XSL; for use with zen
coding actions to determine type of snippets to use
'''
doc_type = default
css_exts = ['css', 'less']
xsl_exts = ['xsl', 'xslt']
path = context.path()
if path is not None:
pos = path.rfind('.')
if pos != -1:
pos += 1
ext = path[pos:]
if ext in css_exts:
doc_type = 'css'
elif ext in xsl_exts:
doc_type = 'xsl'
# No luck with the extension; check for inline style tags
if doc_type == 'html':
range = get_range(context)
cursor = range.location + range.length
content = context.string()
start, end = html_matcher.match(content, cursor)
tag = html_matcher.last_match['opening_tag']
if tag is not None:
tag = tag.name
if tag == 'style':
doc_type = 'css'
return doc_type
# ===============================================================
# Insertion methods
# ===============================================================
def insert_text(context, text, range):
'''Immediately replaces the text at range with passed in text'''
context.beginUndoGrouping()
context.replaceCharactersInRange_withString_(range, text)
context.endUndoGrouping()
def insert_text_and_select(context, text, range, select_range):
'''Immediately inserts the text and selects the given range'''
context.beginUndoGrouping()
context.replaceCharactersInRange_withString_(range, text)
context.setSelectedRange_(select_range)
context.endUndoGrouping()
| |
# Copyright (c) 2019 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib import constants
from neutron_lib import context
from oslo_config import cfg
import webob.exc
from neutron.db import db_base_plugin_v2
from neutron.db import segments_db
from neutron.extensions import network_segment_range as ext_range
from neutron.services.network_segment_range import plugin as plugin_range
from neutron.tests.unit.db import test_db_base_plugin_v2
SERVICE_PLUGIN_KLASS = ('neutron.services.network_segment_range.plugin.'
'NetworkSegmentRangePlugin')
TEST_PLUGIN_KLASS = (
'neutron.tests.unit.extensions.test_network_segment_range.'
'NetworkSegmentRangeTestPlugin')
TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
class NetworkSegmentRangeExtensionManager(object):
def get_resources(self):
return ext_range.Network_segment_range.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class NetworkSegmentRangeTestBase(test_db_base_plugin_v2.
NeutronDbPluginV2TestCase):
def _create_network_segment_range(self, fmt, expected_res_status=None,
**kwargs):
network_segment_range = {'network_segment_range': {}}
for k, v in kwargs.items():
network_segment_range['network_segment_range'][k] = str(v)
network_segment_range_req = self.new_create_request(
'network-segment-ranges', network_segment_range, fmt)
network_segment_range_res = network_segment_range_req.get_response(
self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status,
network_segment_range_res.status_int)
return network_segment_range_res
def network_segment_range(self, **kwargs):
res = self._create_network_segment_range(self.fmt, **kwargs)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
def _test_create_network_segment_range(self, expected=None, **kwargs):
network_segment_range = self.network_segment_range(**kwargs)
self._validate_resource(network_segment_range, kwargs,
'network_segment_range')
if expected:
self._compare_resource(network_segment_range, expected,
'network_segment_range')
return network_segment_range
def _test_update_network_segment_range(self, range_id,
data, expected=None):
update_req = self.new_update_request(
'network-segment-ranges', data, range_id)
update_res = update_req.get_response(self.ext_api)
if expected:
network_segment_range = self.deserialize(self.fmt, update_res)
self._compare_resource(network_segment_range, expected,
'network_segment_range')
return network_segment_range
return update_res
class NetworkSegmentRangeTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
plugin_range.NetworkSegmentRangePlugin):
"""Test plugin to mixin the network segment range extension."""
__native_pagination_support = True
__native_sorting_support = True
__filter_validation_support = True
supported_extension_aliases = ["provider", "network-segment-range"]
def __init__(self):
super(NetworkSegmentRangeTestPlugin, self).__init__()
self.type_manager = mock.Mock()
class TestNetworkSegmentRange(NetworkSegmentRangeTestBase):
def setUp(self, plugin=None):
if not plugin:
plugin = TEST_PLUGIN_KLASS
service_plugins = {'network_segment_range_plugin_name':
SERVICE_PLUGIN_KLASS}
cfg.CONF.set_override('service_plugins', [SERVICE_PLUGIN_KLASS])
ext_mgr = NetworkSegmentRangeExtensionManager()
super(TestNetworkSegmentRange, self).setUp(
plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins)
def _test_create_network_segment_range(self, expected=None, **kwargs):
for d in (kwargs, expected):
if d is None:
continue
d.setdefault('name', '')
d.setdefault('shared', True)
d.setdefault('project_id', None)
d.setdefault('network_type', constants.TYPE_VLAN)
d.setdefault('physical_network', 'phys_net')
d.setdefault('minimum', 200)
d.setdefault('maximum', 300)
return (super(TestNetworkSegmentRange, self).
_test_create_network_segment_range(expected, **kwargs))
def test_create_network_segment_range_empty_name(self):
expected_range = {'name': '',
'shared': True,
'project_id': None,
'network_type': constants.TYPE_VLAN,
'physical_network': 'phys_net',
'minimum': 200,
'maximum': 300}
self._test_create_network_segment_range(expected=expected_range)
def test_create_network_segment_range_with_name(self):
expected_range = {'name': 'foo-range-name',
'shared': True,
'project_id': None,
'network_type': constants.TYPE_VLAN,
'physical_network': 'phys_net',
'minimum': 200,
'maximum': 300}
self._test_create_network_segment_range(
name='foo-range-name',
expected=expected_range)
def test_create_network_segment_range_unsupported_network_type(self):
exc = self.assertRaises(webob.exc.HTTPClientError,
self._test_create_network_segment_range,
network_type='foo-network-type')
self.assertEqual(webob.exc.HTTPClientError.code, exc.code)
self.assertIn('The server could not comply with the request',
exc.explanation)
def test_create_network_segment_range_no_physical_network(self):
expected_range = {'shared': True,
'project_id': None,
'network_type': constants.TYPE_VXLAN,
'physical_network': None}
self._test_create_network_segment_range(
network_type=constants.TYPE_VXLAN,
physical_network=None,
expected=expected_range)
def test_create_network_segment_range_tenant_specific(self):
expected_range = {'shared': False,
'project_id': test_db_base_plugin_v2.TEST_TENANT_ID,
'network_type': constants.TYPE_VLAN,
'physical_network': 'phys_net',
'minimum': 200,
'maximum': 300}
self._test_create_network_segment_range(
shared=False,
project_id=test_db_base_plugin_v2.TEST_TENANT_ID,
network_type=constants.TYPE_VLAN,
physical_network='phys_net',
expected=expected_range)
def test_create_network_segment_ranges_in_certain_order(self):
ctx = context.get_admin_context()
range1 = self._test_create_network_segment_range(
name='foo-range1', physical_network='phys_net1')
range2 = self._test_create_network_segment_range(
name='foo-range2', physical_network='phys_net2')
range3 = self._test_create_network_segment_range(
name='foo-range3', physical_network='phys_net3')
network_segment_ranges = (
NetworkSegmentRangeTestPlugin.get_network_segment_ranges(
NetworkSegmentRangeTestPlugin(), ctx))
self.assertEqual(range1['network_segment_range']['id'],
network_segment_ranges[0]['id'])
self.assertEqual(range2['network_segment_range']['id'],
network_segment_ranges[1]['id'])
self.assertEqual(range3['network_segment_range']['id'],
network_segment_ranges[2]['id'])
def test_create_network_segment_range_failed_with_vlan_minimum_id(self):
exc = self.assertRaises(webob.exc.HTTPClientError,
self._test_create_network_segment_range,
minimum=0)
self.assertEqual(webob.exc.HTTPClientError.code, exc.code)
self.assertIn('The server could not comply with the request',
exc.explanation)
def test_create_network_segment_range_failed_with_vlan_maximum_id(self):
exc = self.assertRaises(webob.exc.HTTPClientError,
self._test_create_network_segment_range,
minimum=4095)
self.assertEqual(webob.exc.HTTPServerError.code, exc.code)
self.assertIn('The server could not comply with the request',
exc.explanation)
def test_create_network_segment_range_failed_with_tunnel_minimum_id(self):
tunnel_type = [constants.TYPE_VXLAN,
constants.TYPE_GRE,
constants.TYPE_GENEVE]
for network_type in tunnel_type:
exc = self.assertRaises(webob.exc.HTTPClientError,
self._test_create_network_segment_range,
network_type=network_type,
physical_network=None,
minimum=0)
self.assertEqual(webob.exc.HTTPClientError.code, exc.code)
self.assertIn('The server could not comply with the request',
exc.explanation)
def test_create_network_segment_range_failed_with_tunnel_maximum_id(self):
expected_res = [(constants.TYPE_VXLAN, 2 ** 24),
(constants.TYPE_GRE, 2 ** 32),
(constants.TYPE_GENEVE, 2 ** 24)]
for network_type, max_id in expected_res:
exc = self.assertRaises(webob.exc.HTTPClientError,
self._test_create_network_segment_range,
network_type=network_type,
physical_network=None,
maximum=max_id)
if network_type == constants.TYPE_GRE:
self.assertEqual(webob.exc.HTTPClientError.code, exc.code)
else:
self.assertEqual(webob.exc.HTTPServerError.code, exc.code)
self.assertIn('The server could not comply with the request',
exc.explanation)
def test_update_network_segment_range_set_name(self):
network_segment_range = self._test_create_network_segment_range()
with mock.patch.object(segments_db, 'min_max_actual_segments_in_range',
return_value=(None, None)):
result = self._update(
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'name': 'foo-name'}},
expected_code=webob.exc.HTTPOk.code)
self.assertEqual('foo-name',
result['network_segment_range']['name'])
def test_update_network_segment_range_set_name_to_empty(self):
network_segment_range = self._test_create_network_segment_range(
name='foo-range-name')
with mock.patch.object(segments_db, 'min_max_actual_segments_in_range',
return_value=(None, None)):
result = self._update(
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'name': ''}},
expected_code=webob.exc.HTTPOk.code)
self.assertEqual('', result['network_segment_range']['name'])
def test_update_network_segment_range_min_max(self):
network_segment_range = self._test_create_network_segment_range()
with mock.patch.object(segments_db, 'min_max_actual_segments_in_range',
return_value=(None, None)):
result = self._update(
'network-segment-ranges',
network_segment_range['network_segment_range']['id'],
{'network_segment_range': {'minimum': 1200, 'maximum': 1300}},
expected_code=webob.exc.HTTPOk.code)
self.assertEqual(1200, result['network_segment_range']['minimum'])
self.assertEqual(1300, result['network_segment_range']['maximum'])
def test_get_network_segment_range(self):
network_segment_range = self._test_create_network_segment_range()
req = self.new_show_request(
'network-segment-ranges',
network_segment_range['network_segment_range']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(
network_segment_range['network_segment_range']['id'],
res['network_segment_range']['id'])
def test_list_network_segment_ranges(self):
self._test_create_network_segment_range(name='foo-range1')
self._test_create_network_segment_range(
name='foo-range2', minimum=400, maximum=500)
res = self._list('network-segment-ranges')
self.assertEqual(2, len(res['network_segment_ranges']))
def test_list_network_segment_ranges_with_sort(self):
range1 = self._test_create_network_segment_range(
name='foo-range1', physical_network='phys_net1')
range2 = self._test_create_network_segment_range(
name='foo-range2', physical_network='phys_net2')
self._test_list_with_sort('network-segment-range',
(range2, range1),
[('name', 'desc')])
def test_list_network_segment_ranges_with_pagination(self):
range1 = self._test_create_network_segment_range(
name='foo-range1', physical_network='phys_net1')
range2 = self._test_create_network_segment_range(
name='foo-range2', physical_network='phys_net2')
range3 = self._test_create_network_segment_range(
name='foo-range3', physical_network='phys_net3')
self._test_list_with_pagination(
'network-segment-range',
(range1, range2, range3),
('name', 'asc'), 2, 2)
def test_list_network_segment_ranges_with_pagination_reverse(self):
range1 = self._test_create_network_segment_range(
name='foo-range1', physical_network='phys_net1')
range2 = self._test_create_network_segment_range(
name='foo-range2', physical_network='phys_net2')
range3 = self._test_create_network_segment_range(
name='foo-range3', physical_network='phys_net3')
self._test_list_with_pagination_reverse(
'network-segment-range',
(range1, range2, range3),
('name', 'asc'), 2, 2)
def test_delete_network_segment_range(self):
network_segment_range = self._test_create_network_segment_range()
with mock.patch.object(segments_db, 'network_segments_exist_in_range',
return_value=False):
self._delete('network-segment-ranges',
network_segment_range['network_segment_range']['id'])
self._show('network-segment-ranges',
network_segment_range['network_segment_range']['id'],
expected_code=webob.exc.HTTPNotFound.code)
| |
""" Python test discovery, setup and run of test functions. """
import collections
import enum
import fnmatch
import inspect
import os
import sys
import warnings
from functools import partial
from textwrap import dedent
import py
import _pytest
from _pytest import deprecated
from _pytest import fixtures
from _pytest import nodes
from _pytest._code import filter_traceback
from _pytest.compat import ascii_escaped
from _pytest.compat import get_default_arg_names
from _pytest.compat import get_real_func
from _pytest.compat import getfslineno
from _pytest.compat import getimfunc
from _pytest.compat import getlocation
from _pytest.compat import is_generator
from _pytest.compat import NOTSET
from _pytest.compat import REGEX_TYPE
from _pytest.compat import safe_getattr
from _pytest.compat import safe_isclass
from _pytest.compat import STRING_TYPES
from _pytest.config import hookimpl
from _pytest.main import FSHookProxy
from _pytest.mark import MARK_GEN
from _pytest.mark.structures import get_unpacked_marks
from _pytest.mark.structures import normalize_mark_list
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.pathlib import parts
from _pytest.warning_types import PytestCollectionWarning
from _pytest.warning_types import PytestUnhandledCoroutineWarning
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(__import__("pytest"), name))
if node is not None:
return node.obj
doc = "python {} object this node was collected from (can be None).".format(
name.lower()
)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--fixtures",
"--funcargs",
action="store_true",
dest="showfixtures",
default=False,
help="show available fixtures, sorted by plugin appearance "
"(fixtures with leading '_' are only shown with '-v')",
)
group.addoption(
"--fixtures-per-test",
action="store_true",
dest="show_fixtures_per_test",
default=False,
help="show fixtures per test",
)
parser.addini(
"python_files",
type="args",
# NOTE: default is also used in AssertionRewritingHook.
default=["test_*.py", "*_test.py"],
help="glob-style file patterns for Python test module discovery",
)
parser.addini(
"python_classes",
type="args",
default=["Test"],
help="prefixes or glob names for Python test class discovery",
)
parser.addini(
"python_functions",
type="args",
default=["test"],
help="prefixes or glob names for Python test function and method discovery",
)
parser.addini(
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
type="bool",
default=False,
help="disable string escape non-ascii characters, might cause unwanted "
"side effects(use at your own risk)",
)
group.addoption(
"--import-mode",
default="prepend",
choices=["prepend", "append"],
dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.",
)
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
if config.option.show_fixtures_per_test:
show_fixtures_per_test(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ["parameterize", "parametrise", "parameterise"]
for mark_name in alt_spellings:
if metafunc.definition.get_closest_marker(mark_name):
msg = "{0} has '{1}' mark, spelling should be 'parametrize'"
fail(msg.format(metafunc.function.__name__, mark_name), pytrace=False)
for marker in metafunc.definition.iter_markers(name="parametrize"):
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line(
"markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see https://docs.pytest.org/en/latest/parametrize.html for more info "
"and examples.",
)
config.addinivalue_line(
"markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see "
"https://docs.pytest.org/en/latest/fixture.html#usefixtures ",
)
@hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
iscoroutinefunction = getattr(inspect, "iscoroutinefunction", None)
if iscoroutinefunction is not None and iscoroutinefunction(testfunction):
msg = "Coroutine functions are not natively supported and have been skipped.\n"
msg += "You need to install a suitable plugin for your async framework, for example:\n"
msg += " - pytest-asyncio\n"
msg += " - pytest-trio\n"
msg += " - pytest-tornasync"
warnings.warn(PytestUnhandledCoroutineWarning(msg.format(pyfuncitem.nodeid)))
skip(msg="coroutine function and no async plugin installed (see warnings)")
funcargs = pyfuncitem.funcargs
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
if not path_matches_patterns(
path, parent.config.getini("python_files") + ["__init__.py"]
):
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def path_matches_patterns(path, patterns):
"""Returns True if the given py.path.local matches one of the patterns in the list of globs given"""
return any(path.fnmatch(pattern) for pattern in patterns)
def pytest_pycollect_makemodule(path, parent):
if path.basename == "__init__.py":
return Package(path, parent)
return Module(path, parent)
@hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
return
# nothing was collected elsewhere, let's do it here
if safe_isclass(obj):
if collector.istestclass(obj, name):
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a funtools.wrapped.
# We musn't if it's been wrapped with mock.patch (python 2 only)
if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))):
filename, lineno = getfslineno(obj)
warnings.warn_explicit(
message=PytestCollectionWarning(
"cannot collect %r because it is not a function." % name
),
category=None,
filename=str(filename),
lineno=lineno + 1,
)
elif getattr(obj, "__test__", True):
if is_generator(obj):
res = Function(name, parent=collector)
reason = deprecated.YIELD_TESTS.format(name=name)
res.add_marker(MARK_GEN.xfail(run=False, reason=reason))
res.warn(PytestCollectionWarning(reason))
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def pytest_make_parametrize_id(config, val, argname=None):
return None
class PyobjContext:
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
_ALLOW_MARKERS = True
def __init__(self, *k, **kw):
super().__init__(*k, **kw)
@property
def obj(self):
"""Underlying Python object."""
obj = getattr(self, "_obj", None)
if obj is None:
self._obj = obj = self._getobj()
# XXX evil hack
# used to avoid Instance collector marker duplication
if self._ALLOW_MARKERS:
self.own_markers.extend(get_unpacked_marks(self.obj))
return obj
@obj.setter
def obj(self, value):
self._obj = value
def _getobj(self):
"""Gets the underlying Python object. May be overwritten by subclasses."""
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
name = os.path.splitext(name)[0]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def reportinfo(self):
# XXX caching?
obj = self.obj
compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None)
if isinstance(compat_co_firstlineno, int):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, nodes.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option("python_functions", name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, "__test__", False) is True
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option("python_classes", name)
def istestfunction(self, obj, name):
if self.funcnamefilter(name) or self.isnosetest(obj):
if isinstance(obj, staticmethod):
# static methods need to be unwrapped
obj = safe_getattr(obj, "__func__", False)
return (
safe_getattr(obj, "__call__", False)
and fixtures.getfixturemarker(obj) is None
)
else:
return False
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
name, option
):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, "__dict__", {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
values = []
for dic in dicts:
for name, obj in list(dic.items()):
if name in seen:
continue
seen[name] = True
res = self._makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
values.extend(res)
values.sort(key=lambda item: item.reportinfo()[:2])
return values
def _makeitem(self, name, obj):
# assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
fm = self.session._fixturemanager
definition = FunctionDefinition(name=name, parent=self, callobj=funcobj)
fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls)
metafunc = Metafunc(
definition, fixtureinfo, self.config, cls=cls, module=module
)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(
methods, dict(metafunc=metafunc)
)
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
# add_funcarg_pseudo_fixture_def may have shadowed some fixtures
# with direct parametrization, so make sure we update what the
# function really needs.
fixtureinfo.prune_dependency_tree()
for callspec in metafunc._calls:
subname = "{}[{}]".format(name, callspec.id)
yield Function(
name=subname,
parent=self,
callspec=callspec,
callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id: True},
originalname=name,
)
class Module(nodes.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._importtestmodule()
def collect(self):
self._inject_setup_module_fixture()
self._inject_setup_function_fixture()
self.session._fixturemanager.parsefactories(self)
return super().collect()
def _inject_setup_module_fixture(self):
"""Injects a hidden autouse, module scoped fixture into the collected module object
that invokes setUpModule/tearDownModule if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_module = _get_non_fixture_func(self.obj, "setUpModule")
if setup_module is None:
setup_module = _get_non_fixture_func(self.obj, "setup_module")
teardown_module = _get_non_fixture_func(self.obj, "tearDownModule")
if teardown_module is None:
teardown_module = _get_non_fixture_func(self.obj, "teardown_module")
if setup_module is None and teardown_module is None:
return
@fixtures.fixture(autouse=True, scope="module")
def xunit_setup_module_fixture(request):
if setup_module is not None:
_call_with_optional_argument(setup_module, request.module)
yield
if teardown_module is not None:
_call_with_optional_argument(teardown_module, request.module)
self.obj.__pytest_setup_module = xunit_setup_module_fixture
def _inject_setup_function_fixture(self):
"""Injects a hidden autouse, function scoped fixture into the collected module object
that invokes setup_function/teardown_function if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_function = _get_non_fixture_func(self.obj, "setup_function")
teardown_function = _get_non_fixture_func(self.obj, "teardown_function")
if setup_function is None and teardown_function is None:
return
@fixtures.fixture(autouse=True, scope="function")
def xunit_setup_function_fixture(request):
if request.instance is not None:
# in this case we are bound to an instance, so we need to let
# setup_method handle this
yield
return
if setup_function is not None:
_call_with_optional_argument(setup_function, request.function)
yield
if teardown_function is not None:
_call_with_optional_argument(teardown_function, request.function)
self.obj.__pytest_setup_function = xunit_setup_function_fixture
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(
_pytest._code.ExceptionInfo.from_current().getrepr(style="short")
)
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules" % e.args
)
except ImportError:
from _pytest._code.code import ExceptionInfo
exc_info = ExceptionInfo.from_current()
if self.config.getoption("verbose") < 2:
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
exc_repr = (
exc_info.getrepr(style="short")
if exc_info.traceback
else exc_info.exconly()
)
formatted_tb = str(exc_repr)
raise self.CollectError(
"ImportError while importing test module '{fspath}'.\n"
"Hint: make sure your test modules/packages have valid Python names.\n"
"Traceback:\n"
"{traceback}".format(fspath=self.fspath, traceback=formatted_tb)
)
except _pytest.runner.Skipped as e:
if e.allow_module_level:
raise
raise self.CollectError(
"Using pytest.skip outside of a test is not allowed. "
"To decorate a test function, use the @pytest.mark.skip "
"or @pytest.mark.skipif decorators instead, and to skip a "
"module use `pytestmark = pytest.mark.{skip,skipif}."
)
self.config.pluginmanager.consider_module(mod)
return mod
class Package(Module):
def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None):
session = parent.session
nodes.FSCollector.__init__(
self, fspath, parent=parent, config=config, session=session, nodeid=nodeid
)
self.name = fspath.dirname
self.trace = session.trace
self._norecursepatterns = session._norecursepatterns
self.fspath = fspath
def setup(self):
# not using fixtures to call setup_module here because autouse fixtures
# from packages are not called automatically (#4085)
setup_module = _get_non_fixture_func(self.obj, "setUpModule")
if setup_module is None:
setup_module = _get_non_fixture_func(self.obj, "setup_module")
if setup_module is not None:
_call_with_optional_argument(setup_module, self.obj)
teardown_module = _get_non_fixture_func(self.obj, "tearDownModule")
if teardown_module is None:
teardown_module = _get_non_fixture_func(self.obj, "teardown_module")
if teardown_module is not None:
func = partial(_call_with_optional_argument, teardown_module, self.obj)
self.addfinalizer(func)
def _recurse(self, dirpath):
if dirpath.basename == "__pycache__":
return False
ihook = self.gethookproxy(dirpath.dirpath())
if ihook.pytest_ignore_collect(path=dirpath, config=self.config):
return
for pat in self._norecursepatterns:
if dirpath.check(fnmatch=pat):
return False
ihook = self.gethookproxy(dirpath)
ihook.pytest_collect_directory(path=dirpath, parent=self)
return True
def gethookproxy(self, fspath):
# check if we have the common case of running
# hooks with all conftest.py filesall conftest.py
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
return proxy
def _collectfile(self, path, handle_dupes=True):
assert (
path.isfile()
), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format(
path, path.isdir(), path.exists(), path.islink()
)
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if path in duplicate_paths:
return ()
else:
duplicate_paths.add(path)
if self.fspath == path: # __init__.py
return [self]
return ihook.pytest_collect_file(path=path, parent=self)
def isinitpath(self, path):
return path in self.session._initialpaths
def collect(self):
this_path = self.fspath.dirpath()
init_module = this_path.join("__init__.py")
if init_module.check(file=1) and path_matches_patterns(
init_module, self.config.getini("python_files")
):
yield Module(init_module, self)
pkg_prefixes = set()
for path in this_path.visit(rec=self._recurse, bf=True, sort=True):
# We will visit our own __init__.py file, in which case we skip it.
is_file = path.isfile()
if is_file:
if path.basename == "__init__.py" and path.dirpath() == this_path:
continue
parts_ = parts(path.strpath)
if any(
pkg_prefix in parts_ and pkg_prefix.join("__init__.py") != path
for pkg_prefix in pkg_prefixes
):
continue
if is_file:
yield from self._collectfile(path)
elif not path.isdir():
# Broken symlink or invalid/missing file.
continue
elif path.join("__init__.py").check(file=1):
pkg_prefixes.add(path)
def _get_xunit_setup_teardown(holder, attr_name, param_obj=None):
"""
Return a callable to perform xunit-style setup or teardown if
the function exists in the ``holder`` object.
The ``param_obj`` parameter is the parameter which will be passed to the function
when the callable is called without arguments, defaults to the ``holder`` object.
Return ``None`` if a suitable callable is not found.
"""
# TODO: only needed because of Package!
param_obj = param_obj if param_obj is not None else holder
result = _get_non_fixture_func(holder, attr_name)
if result is not None:
arg_count = result.__code__.co_argcount
if inspect.ismethod(result):
arg_count -= 1
if arg_count:
return lambda: result(param_obj)
else:
return result
def _call_with_optional_argument(func, arg):
"""Call the given function with the given argument if func accepts one argument, otherwise
calls func without arguments"""
arg_count = func.__code__.co_argcount
if inspect.ismethod(func):
arg_count -= 1
if arg_count:
func(arg)
else:
func()
def _get_non_fixture_func(obj, name):
"""Return the attribute from the given object to be used as a setup/teardown
xunit-style function, but only if not marked as a fixture to
avoid calling it twice.
"""
meth = getattr(obj, name, None)
if fixtures.getfixturemarker(meth) is None:
return meth
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if not safe_getattr(self.obj, "__test__", True):
return []
if hasinit(self.obj):
self.warn(
PytestCollectionWarning(
"cannot collect test class %r because it has a "
"__init__ constructor (from: %s)"
% (self.obj.__name__, self.parent.nodeid)
)
)
return []
elif hasnew(self.obj):
self.warn(
PytestCollectionWarning(
"cannot collect test class %r because it has a "
"__new__ constructor (from: %s)"
% (self.obj.__name__, self.parent.nodeid)
)
)
return []
self._inject_setup_class_fixture()
self._inject_setup_method_fixture()
return [Instance(name="()", parent=self)]
def _inject_setup_class_fixture(self):
"""Injects a hidden autouse, class scoped fixture into the collected class object
that invokes setup_class/teardown_class if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_class = _get_non_fixture_func(self.obj, "setup_class")
teardown_class = getattr(self.obj, "teardown_class", None)
if setup_class is None and teardown_class is None:
return
@fixtures.fixture(autouse=True, scope="class")
def xunit_setup_class_fixture(cls):
if setup_class is not None:
func = getimfunc(setup_class)
_call_with_optional_argument(func, self.obj)
yield
if teardown_class is not None:
func = getimfunc(teardown_class)
_call_with_optional_argument(func, self.obj)
self.obj.__pytest_setup_class = xunit_setup_class_fixture
def _inject_setup_method_fixture(self):
"""Injects a hidden autouse, function scoped fixture into the collected class object
that invokes setup_method/teardown_method if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_method = _get_non_fixture_func(self.obj, "setup_method")
teardown_method = getattr(self.obj, "teardown_method", None)
if setup_method is None and teardown_method is None:
return
@fixtures.fixture(autouse=True, scope="function")
def xunit_setup_method_fixture(self, request):
method = request.function
if setup_method is not None:
func = getattr(self, "setup_method")
_call_with_optional_argument(func, method)
yield
if teardown_method is not None:
func = getattr(self, "teardown_method")
_call_with_optional_argument(func, method)
self.obj.__pytest_setup_method = xunit_setup_method_fixture
class Instance(PyCollector):
_ALLOW_MARKERS = False # hack, destroy later
# instances share the object with their parents in a way
# that duplicates markers instances if not taken out
# can be removed at node structure reorganization time
def _getobj(self):
return self.parent.obj()
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super().collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if isinstance(self.parent, Instance):
self.parent.newinstance()
self.obj = self._getobj()
def _prunetraceback(self, excinfo):
if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False):
code = _pytest._code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.getoption("tbstyle", "auto") == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style("short")
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.getoption("tbstyle", "auto")
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
def hasinit(obj):
init = getattr(obj, "__init__", None)
if init:
return init != object.__init__
def hasnew(obj):
new = getattr(obj, "__new__", None)
if new:
return new != object.__new__
class CallSpec2:
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = NOTSET
self._globalparam = NOTSET
self._arg2scopenum = {} # used for sorting parametrized resources
self.marks = []
self.indices = {}
def copy(self):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.marks.extend(self.marks)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate {!r}".format(arg))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is NOTSET:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index):
for arg, val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
self._idlist.append(id)
self.marks.extend(normalize_mark_list(marks))
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not NOTSET:
self._idlist.append(id)
if param is not NOTSET:
assert self._globalparam is NOTSET
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = fixtures.scopenum_function
class Metafunc(fixtures.FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
"""
def __init__(self, definition, fixtureinfo, config, cls=None, module=None):
assert (
isinstance(definition, FunctionDefinition)
or type(definition).__name__ == "DefinitionMock"
)
self.definition = definition
#: access to the :class:`_pytest.config.Config` object for the test session
self.config = config
#: the module object where the test function is defined in.
self.module = module
#: underlying python test function
self.function = definition.obj
#: set of fixture names required by the test function
self.fixturenames = fixtureinfo.names_closure
#: class object where the test function is defined in or ``None``.
self.cls = cls
self._calls = []
self._ids = set()
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id. If None is given as id of specific test, the
automatically generated id for that argument will be used.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
from _pytest.fixtures import scope2index
from _pytest.mark import ParameterSet
argnames, parameters = ParameterSet._for_parametrize(
argnames,
argvalues,
self.function,
self.config,
function_definition=self.definition,
)
del argvalues
if scope is None:
scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
self._validate_if_using_arg_names(argnames, indirect)
arg_values_types = self._resolve_arg_value_types(argnames, indirect)
ids = self._resolve_arg_ids(argnames, ids, parameters, item=self.definition)
scopenum = scope2index(
scope, descr="parametrize() call in {}".format(self.function.__name__)
)
# create the new calls: if we are parametrize() multiple times (by applying the decorator
# more than once) then we accumulate those calls generating the cartesian product
# of all calls
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)):
newcallspec = callspec.copy()
newcallspec.setmulti2(
arg_values_types,
argnames,
param_set.values,
param_id,
param_set.marks,
scopenum,
param_index,
)
newcalls.append(newcallspec)
self._calls = newcalls
def _resolve_arg_ids(self, argnames, ids, parameters, item):
"""Resolves the actual ids for the given argnames, based on the ``ids`` parameter given
to ``parametrize``.
:param List[str] argnames: list of argument names passed to ``parametrize()``.
:param ids: the ids parameter of the parametrized call (see docs).
:param List[ParameterSet] parameters: the list of parameter values, same size as ``argnames``.
:param Item item: the item that generated this parametrized call.
:rtype: List[str]
:return: the list of ids for each argname given
"""
from _pytest._io.saferepr import saferepr
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids:
func_name = self.function.__name__
if len(ids) != len(parameters):
msg = "In {}: {} parameter sets specified, with different number of ids: {}"
fail(msg.format(func_name, len(parameters), len(ids)), pytrace=False)
for id_value in ids:
if id_value is not None and not isinstance(id_value, str):
msg = "In {}: ids must be list of strings, found: {} (type: {!r})"
fail(
msg.format(func_name, saferepr(id_value), type(id_value)),
pytrace=False,
)
ids = idmaker(argnames, parameters, idfn, ids, self.config, item=item)
return ids
def _resolve_arg_value_types(self, argnames, indirect):
"""Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg"
to the function, based on the ``indirect`` parameter of the parametrized() call.
:param List[str] argnames: list of argument names passed to ``parametrize()``.
:param indirect: same ``indirect`` parameter of ``parametrize()``.
:rtype: Dict[str, str]
A dict mapping each arg name to either:
* "params" if the argname should be the parameter of a fixture of the same name.
* "funcargs" if the argname should be a parameter to the parametrized test function.
"""
valtypes = {}
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
fail(
"In {}: indirect fixture '{}' doesn't exist".format(
self.function.__name__, arg
),
pytrace=False,
)
valtypes[arg] = "params"
return valtypes
def _validate_if_using_arg_names(self, argnames, indirect):
"""
Check if all argnames are being used, by default values, or directly/indirectly.
:param List[str] argnames: list of argument names passed to ``parametrize()``.
:param indirect: same ``indirect`` parameter of ``parametrize()``.
:raise ValueError: if validation fails.
"""
default_arg_names = set(get_default_arg_names(self.function))
func_name = self.function.__name__
for arg in argnames:
if arg not in self.fixturenames:
if arg in default_arg_names:
fail(
"In {}: function already takes an argument '{}' with a default value".format(
func_name, arg
),
pytrace=False,
)
else:
if isinstance(indirect, (tuple, list)):
name = "fixture" if arg in indirect else "argument"
else:
name = "fixture" if indirect else "argument"
fail(
"In {}: function uses no {} '{}'".format(func_name, name, arg),
pytrace=False,
)
def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
"""Find the most appropriate scope for a parametrized call based on its arguments.
When there's at least one direct argument, always use "function" scope.
When a test function is parametrized and all its arguments are indirect
(e.g. fixtures), return the most narrow scope based on the fixtures used.
Related to issue #1832, based on code posted by @Kingdread.
"""
from _pytest.fixtures import scopes
if isinstance(indirect, (list, tuple)):
all_arguments_are_fixtures = len(indirect) == len(argnames)
else:
all_arguments_are_fixtures = bool(indirect)
if all_arguments_are_fixtures:
fixturedefs = arg2fixturedefs or {}
used_scopes = [
fixturedef[0].scope
for name, fixturedef in fixturedefs.items()
if name in argnames
]
if used_scopes:
# Takes the most narrow scope from used fixtures
for scope in reversed(scopes):
if scope in used_scopes:
return scope
return "function"
def _ascii_escaped_by_config(val, config):
if config is None:
escape_option = False
else:
escape_option = config.getini(
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
)
return val if escape_option else ascii_escaped(val)
def _idval(val, argname, idx, idfn, item, config):
if idfn:
try:
generated_id = idfn(val)
if generated_id is not None:
val = generated_id
except Exception as e:
# See issue https://github.com/pytest-dev/pytest/issues/2169
msg = "{}: error raised while trying to determine id of parameter '{}' at position {}\n"
msg = msg.format(item.nodeid, argname, idx)
raise ValueError(msg) from e
elif config:
hook_id = config.hook.pytest_make_parametrize_id(
config=config, val=val, argname=argname
)
if hook_id:
return hook_id
if isinstance(val, STRING_TYPES):
return _ascii_escaped_by_config(val, config)
elif val is None or isinstance(val, (float, int, bool)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return ascii_escaped(val.pattern)
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif (inspect.isclass(val) or inspect.isfunction(val)) and hasattr(val, "__name__"):
return val.__name__
return str(argname) + str(idx)
def _idvalset(idx, parameterset, argnames, idfn, ids, item, config):
if parameterset.id is not None:
return parameterset.id
if ids is None or (idx >= len(ids) or ids[idx] is None):
this_id = [
_idval(val, argname, idx, idfn, item=item, config=config)
for val, argname in zip(parameterset.values, argnames)
]
return "-".join(this_id)
else:
return _ascii_escaped_by_config(ids[idx], config)
def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None):
ids = [
_idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item)
for valindex, parameterset in enumerate(parametersets)
]
if len(set(ids)) != len(ids):
# The ids are not unique
duplicates = [testid for testid in ids if ids.count(testid) > 1]
counters = collections.defaultdict(lambda: 0)
for index, testid in enumerate(ids):
if testid in duplicates:
ids[index] = testid + str(counters[testid])
counters[testid] += 1
return ids
def show_fixtures_per_test(config):
from _pytest.main import wrap_session
return wrap_session(config, _show_fixtures_per_test)
def _show_fixtures_per_test(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
def get_best_relpath(func):
loc = getlocation(func, curdir)
return curdir.bestrelpath(loc)
def write_fixture(fixture_def):
argname = fixture_def.argname
if verbose <= 0 and argname.startswith("_"):
return
if verbose > 0:
bestrel = get_best_relpath(fixture_def.func)
funcargspec = "{} -- {}".format(argname, bestrel)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
fixture_doc = fixture_def.func.__doc__
if fixture_doc:
write_docstring(tw, fixture_doc)
else:
tw.line(" no docstring available", red=True)
def write_item(item):
try:
info = item._fixtureinfo
except AttributeError:
# doctests items have no _fixtureinfo attribute
return
if not info.name2fixturedefs:
# this test item does not use any fixtures
return
tw.line()
tw.sep("-", "fixtures used by {}".format(item.name))
tw.sep("-", "({})".format(get_best_relpath(item.function)))
# dict key not used in loop but needed for sorting
for _, fixturedefs in sorted(info.name2fixturedefs.items()):
assert fixturedefs is not None
if not fixturedefs:
continue
# last item is expected to be the one used by the test item
write_fixture(fixturedefs[-1])
for session_item in session.items:
write_item(session_item)
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
seen = set()
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
for fixturedef in fixturedefs:
loc = getlocation(fixturedef.func, curdir)
if (fixturedef.argname, loc) in seen:
continue
seen.add((fixturedef.argname, loc))
available.append(
(
len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname,
fixturedef,
)
)
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from {}".format(module))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
tw.write(argname, green=True)
if fixturedef.scope != "function":
tw.write(" [%s scope]" % fixturedef.scope, cyan=True)
if verbose > 0:
tw.write(" -- %s" % bestrel, yellow=True)
tw.write("\n")
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
write_docstring(tw, doc)
else:
tw.line(" {}: no docstring available".format(loc), red=True)
tw.line()
def write_docstring(tw, doc, indent=" "):
doc = doc.rstrip()
if "\n" in doc:
firstline, rest = doc.split("\n", 1)
else:
firstline, rest = doc, ""
if firstline.strip():
tw.line(indent + firstline.strip())
if rest:
for line in dedent(rest).split("\n"):
tw.write(indent + line + "\n")
class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
# disable since functions handle it themselves
_ALLOW_MARKERS = False
def __init__(
self,
name,
parent,
args=None,
config=None,
callspec=None,
callobj=NOTSET,
keywords=None,
session=None,
fixtureinfo=None,
originalname=None,
):
super().__init__(name, parent, config=config, session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
self.own_markers.extend(get_unpacked_marks(self.obj))
if callspec:
self.callspec = callspec
# this is total hostile and a mess
# keywords are broken by design by now
# this will be redeemed later
for mark in callspec.marks:
# feel free to cry, this was broken for years before
# and keywords cant fix it per design
self.keywords[mark.name] = mark
self.own_markers.extend(normalize_mark_list(callspec.marks))
if keywords:
self.keywords.update(keywords)
# todo: this is a hell of a hack
# https://github.com/pytest-dev/pytest/issues/4569
self.keywords.update(
dict.fromkeys(
[
mark.name
for mark in self.iter_markers()
if mark.name not in self.keywords
],
True,
)
)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self, self.obj, self.cls, funcargs=True
)
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
#: original function name, without any decorations (for example
#: parametrization adds a ``"[...]"`` suffix to function names).
#:
#: .. versionadded:: 3.0
self.originalname = originalname
def _initrequest(self):
self.funcargs = {}
self._request = fixtures.FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getimfunc(self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
super().setup()
fixtures.fillfixtures(self)
class FunctionDefinition(Function):
"""
internal hack until we get actual definition nodes instead of the
crappy metafunc hack
"""
def runtest(self):
raise RuntimeError("function definitions are not supposed to be used")
setup = runtest
| |
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Autoregressive Moving Average model."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.sts.components.autoregressive import make_ar_transition_matrix
from tensorflow_probability.python.sts.internal import util as sts_util
class AutoregressiveMovingAverageStateSpaceModel(
tfd.LinearGaussianStateSpaceModel):
"""State space model for an autoregressive moving average process.
A state space model (SSM) posits a set of latent (unobserved) variables that
evolve over time with dynamics specified by a probabilistic transition model
`p(z[t+1] | z[t])`. At each timestep, we observe a value sampled from an
observation model conditioned on the current state, `p(x[t] | z[t])`. The
special case where both the transition and observation models are Gaussians
with mean specified as a linear function of the inputs, is known as a linear
Gaussian state space model and supports tractable exact probabilistic
calculations; see `tfp.distributions.LinearGaussianStateSpaceModel` for
details.
In an autoregressive moving average (ARMA) process, the expected level at
each timestep is a linear function of previous levels, with added Gaussian
noise, and a linear function of previous Gaussian noise:
```python
level[t + 1] = (
level_drift
+ noise[t + 1]
+ sum(ar_coefficients * levels[t:t-order:-1])
+ sum(ma_coefficients * noise[t:t-order:-1]))
noise[t + 1] ~ Normal(0., scale=level_scale)
```
The process is characterized by a vector `coefficients` whose size
determines the order of the process (how many previous values it looks at),
and by `level_scale`, the standard deviation of the noise added at each
step.
This is formulated as a state space model by letting the latent state encode
the most recent values; see 'Mathematical Details' below.
The parameters `level_scale` and `observation_noise_scale` are each (a batch
of) scalars, and `coefficients` is a (batch) vector of size `[order]`. The
batch shape of this `Distribution` is the broadcast batch
shape of these parameters and of the `initial_state_prior`.
#### Mathematical Details
The Hamilton autoregressive moving average model implements a
`tfp.distributions.LinearGaussianStateSpaceModel` with `latent_size = order`
and `observation_size = 1`. The latent state vector encodes the recent
history of the process, with the current value in the topmost dimension. At
each timestep, the transition sums the previous values to produce the new
expected value, shifts all other values down by a dimension, and adds noise
to the current value. This is formally encoded by the transition model:
```
transition_matrix = [ ar_coefs[0], ar_coefs[1], ..., ar_coefs[p]
1., 0, ..., 0.
0., 1, ..., 0.
...
0., 0., ..., 1., 0. ]
transition_noise ~ N(loc=level_drift / (1. + sum(ma_coefficients)),
scale=diag([level_scale, 0., 0., ..., 0.]))
```
The observation model simply extracts the current (topmost) value,
sums the previous noise and optionally adds independent noise at each step:
```
observation_matrix = [1, ma_coefs[0], ma_coefs[1], ..., ma_coefs[p-1]]
observation_noise ~ N(loc=0, scale=observation_noise_scale)
```
Models with `observation_noise_scale = 0.` are ARMA(p, p-1) processes
in the formal sense. Setting `observation_noise_scale` to a nonzero value
corresponds to a latent ARMA(p, p-1) process observed under an iid noise
model.
#### References
[1] James D. Hamilton. State-space models. __Handbook of Econometrics,
Volume IV__ (1994): 3039-3080.
http://web.pdx.edu/~crkl/readings/Hamilton94.pdf
"""
def __init__(self,
num_timesteps,
ar_coefficients,
ma_coefficients,
level_scale,
initial_state_prior,
level_drift=0.,
observation_noise_scale=0.,
name=None,
**linear_gaussian_ssm_kwargs):
"""Builds a state space model implementing an ARMA(p, p - 1) process.
Args:
num_timesteps: Scalar `int` `Tensor` number of timesteps to model
with this distribution.
ar_coefficients: `float` `Tensor` of shape `concat(batch_shape,
[order])` defining the autoregressive coefficients. The
ar_coefficients are defined
backwards in time: `ar_coefficients[0] * level[t] +
ar_coefficients[1] * level[t-1] + ... +
ar_coefficients[order-1] * level[t-order+1]`.
ma_coefficients: `float` `Tensor` of shape `concat(batch_shape,
[order])` defining the moving average coefficients. The
ma_coefficients are defined
backwards in time: `noise[t] + ma_coefficients[0] * noise[t-1] +
... + ma_coefficients[order-2] * noise[t-order+1]`.
level_scale: Scalar (any additional dimensions are treated as batch
dimensions) `float` `Tensor` indicating the standard deviation of
the transition noise at each step.
initial_state_prior: instance of `tfd.MultivariateNormal`
representing the prior distribution on latent states. Must have
event shape `[order]`.
level_drift: Scalar (any additional dimensions are
treated as batch dimensions) `float` `Tensor` indicating a
deterministic drift added to the level at each step.
Default value: 0.
observation_noise_scale: Scalar (any additional dimensions are
treated as batch dimensions) `float` `Tensor` indicating the
standard deviation of the observation noise.
Default value: 0.
name: Python `str` name prefixed to ops created by this class.
Default value: "AutoregressiveStateSpaceModel".
**linear_gaussian_ssm_kwargs: Optional additional keyword arguments
to to the base `tfd.LinearGaussianStateSpaceModel` constructor.
Notes: This distribution is always represented as a ARMA(p, p - 1)
process internally where q = p - 1 due to 'Mathematical Details'
above. If q + 1 != p is desired, then either `ar_coefficients` or
`ma_coefficients` will be automatically padded with zeros by the
required amount to become a ARMA(p, p - 1) process.
"""
parameters = dict(locals())
parameters.update(linear_gaussian_ssm_kwargs)
del parameters['linear_gaussian_ssm_kwargs']
with tf.name_scope(name or 'ARMAStateSpaceModel') as name:
# The initial state prior determines the dtype of sampled values.
# Other model parameters must have the same dtype.
dtype = initial_state_prior.dtype
ar_coefficients = tf.convert_to_tensor(
value=ar_coefficients, name='ar_coefficients', dtype=dtype)
ma_coefficients = tf.convert_to_tensor(
value=ma_coefficients, name='ma_coefficients', dtype=dtype)
level_scale = tf.convert_to_tensor(
value=level_scale, name='level_scale', dtype=dtype)
level_drift = tf.convert_to_tensor(
value=level_drift, name='level_drift', dtype=dtype)
observation_noise_scale = tf.convert_to_tensor(
value=observation_noise_scale,
name='observation_noise_scale',
dtype=dtype)
# Canonicalize as ARMA[order, order - 1], where order = max(p, q + 1).
ar_order = ps.shape(ar_coefficients)[-1]
ma_order = ps.shape(ma_coefficients)[-1]
order = ps.maximum(ar_order, ma_order + 1)
ar_coefficients = sts_util.pad_tensor_with_trailing_zeros(
ar_coefficients, order - ar_order)
ma_coefficients = sts_util.pad_tensor_with_trailing_zeros(
ma_coefficients, (order - 1) - ma_order)
self._order = order
self._ar_coefficients = ar_coefficients
self._ma_coefficients = ma_coefficients
self._level_scale = level_scale
self._level_drift = level_drift
self._observation_noise_scale = observation_noise_scale
# Ensure the prior's shape matches the padded order.
prior_event_dimension = tf.compat.dimension_value(
initial_state_prior.event_shape[-1])
if (prior_event_dimension is not None and
prior_event_dimension != self.order):
raise ValueError('prior event dimension needs to match max(p, q + 1). '
f'prior event dimension: {prior_event_dimension}, '
f'max(p, q + 1): {self.order}.')
# Incorporate drift in the latent process, divided by `1 + sum(ma_coefs)`
# to preserve the magnitude of the effect on the observed series.
# TODO(b/211794069): Handle `level_drift` stably when sum(ma_coefs) ~= -1.
ma_factor = 1. + tf.reduce_sum(ma_coefficients, axis=-1)
latent_level_drift = level_drift / tf.where(
# Prevent blowup in the drift-free case.
tf.equal(level_drift, 0.), tf.ones_like(ma_factor), ma_factor)
super(AutoregressiveMovingAverageStateSpaceModel, self).__init__(
num_timesteps=num_timesteps,
transition_matrix=make_ar_transition_matrix(ar_coefficients),
transition_noise=tfd.MultivariateNormalDiag(
loc=sts_util.pad_tensor_with_trailing_zeros(
latent_level_drift[..., tf.newaxis],
self.order - 1),
scale_diag=sts_util.pad_tensor_with_trailing_zeros(
level_scale[..., tf.newaxis], self.order - 1)),
observation_matrix=make_ma_observation_matrix(ma_coefficients),
observation_noise=tfd.MultivariateNormalDiag(
scale_diag=observation_noise_scale[..., tf.newaxis]),
initial_state_prior=initial_state_prior,
name=name,
**linear_gaussian_ssm_kwargs)
self._parameters = parameters
@property
def order(self):
return self._order
@property
def ar_coefficients(self):
return self._ar_coefficients
@property
def ma_coefficients(self):
return self._ma_coefficients
@property
def level_drift(self):
return self._level_drift
@property
def level_scale(self):
return self._level_scale
@property
def observation_noise_scale(self):
return self._observation_noise_scale
def make_ma_observation_matrix(coefficients):
"""Build observation matrix for an moving average StateSpaceModel.
When applied in the observation equation, this row vector extracts the
current (topmost) value and then takes a linear combination of previous
noise values that were added during previous recursive steps:
```
observation_matrix = [1, theta[0], theta[1], ..., theta[p-1]]
```
To ensure broadcasting with the transition matrix, we return a shape of:
`concat([batch_shape, [1, order])`
Args:
coefficients: float `Tensor` of shape `concat([batch_shape, [order - 1])`.
Returns:
ma_matrix: float `Tensor` with shape `concat([batch_shape, [1, order])`.
"""
batch_shape = ps.shape(coefficients)[:-1]
top_entry = tf.ones(ps.concat([batch_shape, [1, 1]], axis=0),
dtype=coefficients.dtype)
return tf.concat([top_entry, coefficients[..., tf.newaxis, :]], axis=-1)
| |
"""HEC-RAS Output variables
-------------------------------------------------------------------------------
| Name | ID | Description |
-------------------------------------------------------------------------------
PROFILE 1 Profile number.
WS_ELEV 2 Calculated water surface from energy equation.
EG_ELEV 3 Energy gradeline for given WSEL.
MAX_CHL_DPTH 4 Maximum main channel depth.
MIN_CH_EL 5 Minimum channel elevation.
Q_LEFT 6 Flow in left overbank.
Q_CHANNEL 7 Flow in main channel.
Q_RIGHT 8 Flow in right overbank.
Q_TOTAL 9 Total flow in cross section.
FLOW_AREA 10 Total area of cross section active flow.
FLOW_AREA_L 11 Area of left overbank active flow.
FLOW_AREA_CH 12 Area of main channel active flow.
FLOW_AREA_R 13 Area of right overbank active flow.
WP_TOTAL 14 Wetted perimeter of total cross section.
WP_LEFT 15 Wetted perimeter of left overbank.
WP_CHANNEL 16 Wetted perimeter of main channel.
WP_RIGHT 17 Wetted perimeter of right overbank.
CONV_TOTAL 18 Conveyance of total cross section.
CONV_LEFT 19 Conveyance of left overbank.
CONV_CHNL 20 Conveyance of main channel.
CONV_RIGHT 21 Conveyance of right overbank.
VEL_HEAD 22 Velocity head.
VEL_TOTAL 23 Average velocity of flow in total cross section.
VEL_LEFT 24 Average velocity of flow in left overbank.
VEL_CHNL 25 Average velocity of flow in main channel.
VEL_RIGHT 26 Average velocity of flow in right overbank.
ALPHA 27 Alpha - energy weighting coefficient.
BETA 28 Beta - momentum weighting coefficient.
TOP_WDTH_ACT 29 Top width of the wetted cross section, not including
ineffective flow.
EG_SLOPE 30 Slope of the energy grade line at a cross section.
VOLUME 31 Cumulative volume of water from the downstream end
of the reach (including ineffective areas).
AREA 32 Flow area of the entire cross section including
ineffective flow.
AREA_LEFT 33 Flow area of the left overbank including ineffective
flow.
AREA_CHANNEL 34 Flow area of the main channel including ineffective
flow.
AREA_RIGHT 35 Flow area of the right overbank including
ineffective flow.
STA_WS_LFT 36 Left station where water intersects the ground.
STA_WS_RGT 37 Right station where water intersects the ground.
LEFT_STA_EFF 38 Furthest left station where there is effective flow.
RGHT_STA_EFF 39 Furthest right station that still has effective
flow.
LENGTH_WTD 40 Weighted length based on flow distribution, in left
bank, channel, and right bank.
LENGTH_LEFT 41 Downstream reach length of the left overbank.
LENGTH_CHNL 42 Downstream reach length of the main channel to next
XS (unless BR is d/s, then this is the distance to
the deck/roadway).
LENGTH_RGHT 43 Downstream reach length of the right overbank.
MANN_WTD_LEFT 44 Conveyance weighted Manning's n for the left
overbank.
MANN_WTD_CHNL 45 Conveyance weighted Manning's n for the main
channel.
MANN_WTD_RGHT 46 Conveyance weighted Manning's n for the right
overbank.
MANN_COMP 47 Mannings n value for main channel based on composite
roughness equation.
FROUDE_N_CHL 48 Froude number for the main channel.
FROUDE_N_XS 49 Froude number for the entire cross section.
TRVL_TME_AVG 50 Cumulative travel time based on the average velocity
of the entire cross section per reach.
TRVL_TME_CHL 51 Cumulative travel time based on the average velocity
of the main channel per reach.
CONV_RATIO 52 Ratio of the conveyance of the current cross
section to the conveyance of the downstream cross
section.
SPECIF_FORCE 53 The specific force for this cross section at the
computed water surface elevation.
SPC_FORCE_PR 54 Specific force prime. For mixed flow, the specific
force at this cross section for the flow regime that
does not control.
WS_PRIME 55 Water surface prime. For mixed flow, the water
surface of the flow regime that does not control.
CRIT_WS 56 Critical water surface elevation. Water surface
corresponding to the minimum energy on the energy
versus depth curve.
CRIT_EG 57 Critical energy elevation. Minumum energy on the
energy versus depth curve.
CRIT_DEPTH 58 Critical depth. Corresponds to critical water
surface.
FRCTN_LOSS 59 Friction loss between two cross sections.
C_E_LOSS 60 Contraction or expansion loss between two cross
sections.
HEADLOSS 61 Total energy loss between two cross sections.
TOP_WIDTH 62 Top width of the wetted cross section.
TOP_W_LEFT 63 Top width of the left overbank. Does not include
`islands', but it does include ineffective flow.
TOP_W_CHNL 64 Top width of the main channel. Does not include
`islands', but it does include ineffective flow.
TOP_W_RIGHT 65 Top width of the right overbank. Does not include
`islands', but it does include ineffective flow.
NUM_TRIALS 66 Current number (or final number) of trials attempted
before the energy equation is balanced.
STD_STP_CASE 67 Standard step method used to determine WSEL (1 =
successful convergence, 2 = minimum error,
3 = resorted to critical depth).
FRCTN_SLOPE 68 Representative friction slope between two cross
sections.
FRCTN_SLP_MD 69 Friction slope averaging method used.
MIN_ERROR 70 The minimum error, between the calculated and
assumed water surfaces when balancing the energy
equation.
DELTA_WS 71 Change in water surface through culvert(s) and
Bridge(s).
DELTA_EG 72 Change in energy grade line through culvert(s) and
Bridge(s).
Q_CULV_GROUP 73 Flow through all barrels in a culvert.
Q_BARREL 74 Flow through one barrel in a culvert.
WS_US 75 Upstream water surface elevation upstream of bridge,
culvert or weir (specific to that opening, not
necessarily the energy weighted average).
CLV_EG_NO_WR 76 Energy grade elevation at the culvert that was
calculated without the weir.
EG_US 77 Upstream energy grade elevation at bridge or culvert
(specific to that opening, not necessarily the
weighted average).
EG_IC 78 Upstream energy gradeline based on inlet control.
EG_OC 79 Upstream energy gradeline based on outlet control.
CULV_NML_DEPTH 80 Normal depth for this culvert (and flow).
CULV_VEL_DS 81 Velocity in culvert at defined downstream.
CULV_VEL_US 82 Velocity in culvert at defined upstream.
CULV_FRCTN_LS 83 Friction loss through the culvert.
CULV_ENTR_LOSS 84 Entrance loss (energy loss due only to entrance).
CULV_EXIT_LOSS 85 Exit loss (energy loss due to exit).
CULV_FULL_LEN 86 The length that the culvert flows full.
CULV_CRT_DEPTH 87 Critical depth inside the culvert.
CULV_INV_EL_UP 88 Culvert invert elevation upstream.
CULV_INV_EL_DN 89 Culvert invert elevation downstream.
CULV_EG_INLET 90 Energy gradeline inside the culvert at the inlet.
CULV_EG_OUTLET 91 Energy gradeline inside the culvert at the outlet.
CULV_WS_INLET 92 Water surface elevation inside the culvert at the
inlet.
CULV_WS_OUTLET 93 Water surface elevation inside the culvert at the
outlet.
Q_WEIR 94 Flow over the weir.
WEIR_FLOW_AREA 95 Area of the flow going over the weir.
WEIR_STA_LFT 96 Station where flow starts on the left side.
WEIR_STA_RGT 97 Station where flow ends on the right side.
WEIR_MAX_DEPTH 98 The maximum depth over the weir.
WEIR_AVG_DEPTH 99 The average depth over the weir.
WEIR_SUBMERG 100 The ratio of the downstream depth above the weir to
the upstream depth above the weir.
MIN_EL_WEIR_FLOW 101 Elevation where weir flow begins.
WR_TOP_WDTH 102 Top width of water over the weir.
ENERGY_WR_WS 103 Water surface elevation upstream of bridge for low
flow energy method and weir flow.
YARNELL_WS 104 Water surface elevation upstream of bridge for
Yarnell method.
WSPRO_WS 105 Water surface elevation upstream of bridge for the
WSPRO method.
PRS_WR_WS_ 106 Water surface elevation upstream of bridge for
pressure and/or weir method.
ENERGY_WS 107 Water surface elevation upstream of bridge for
energy only method.
MOMEN_WS 108 Water surface elevation upstream of bridge for
momentum method.
PRS_O_WS 109 Water surface elevation upstream of bridge for
pressure only method.
ENERGY_WR_EG 110 Energy grade elevation upstream of bridge for energy
method .
YARNELL_EG 111 Energy grade elevation upstream of bridge for
Yarnell method.
WSPRO_EG 112 Energy grade elevation upstream of bridge for the
WSPRO method.
PRS_WR_EG 113 Energy grade elevation upstream of bridge for
pressure and/or weir method.
ENERGY_EG 114 Energy grade elevation upstream of bridge for energy
only method.
MOMEN_EG 115 Energy grade elevation upstream of bridge for
momentum method.
PRS_O_EG 116 Energy grade elevation upstream of bridge for
pressure only method.
BR_SEL_METHOD 117 Selected bridge method.
MIN_EL_PRS 118 Elevation at the bridge when pressure flow begins.
CRIT_NUM 119 Number of critical depths found.
CRIT_WS_1 120 Water surface elevation of first critical depth.
CRIT_WS_2 121 Water surface elevation of second critical depth.
CRIT_WS_3 122 Water surface elevation of third critical depth.
CRIT_ENRGY_1 123 Energy associated with first critical depth.
CRIT_ENRGY_2 124 Energy associated with second critical depth.
CRIT_ENRGY_3 125 Energy associated with third critical depth.
HYDR_DEPTH 126 Hydraulic depth for cross section.
HYDR_DEPTH_L 127 Hydraulic depth in left over bank.
HYDR_DEPTH_C 128 Hydraulic depth in channel.
HYDR_DEPTH_R 129 Hydraulic depth for right over bank.
DECK_WIDTH 130 Width of Deck.
N_BARRELS 131 Number of barrels in a culvert.
Q_BRIDGE 132 Flow through a bridge opening.
VOL_LEFT 133 Cumulative volume of water in the left overbank from
the downstream end of the reach (including
ineffective areas).
VOL_CHAN 134 Cumulative volume of water in the channel from the
downstream end of the reach (including ineffective
areas).
VOL_RIGHT 135 Cumulative volume of water in the right overbank
from the downstream end of the reach (including
ineffective areas).
MIN_EL 136 Minimum overall section elevation.
ENC_VAL_1 137 Target for encroachment analysis.
ENC_VAL_2 138 Second target for encroachment analysis.
ENC_STA_L 139 Left station of encroachment.
ENC_STA_R 140 Right station of encroachment.
DIST_CENTER_L 141 Distance from center of channel to left
encroachment.
DIST_CENTER_R 142 Distance from center of channel to right
encroachment.
K_PERC_L 143 Conveyance reduction from left encroachment.
K_PERC_R 144 Conveyance reduction from right encroachment.
Q_PERC_L 145 Percent of flow in left overbank.
Q_PERC_CHAN 146 Percent of flow in main channel.
Q_PERC_R 147 Percent of flow in right overbank.
PROF_DELTA_WS 148 Difference in WS between current profile and WS for
first profile.
PROF_DELTA_EG 149 Difference in EG between current profile and EG for
first profile.
SHEAR_TOTAL 150 Shear stress in total section.
SHEAR_LOB 151 Shear stress in left overbank.
SHEAR_CHAN 152 Shear stress in main channel.
SHEAR_ROB 153 Shear stress in right overbank.
POWER_TOTAL 154 Total stream power.
POWER_LOB 155 Total stream power in left overbank.
POWER_CHAN 156 Total stream power in main channel.
POWER_ROB 157 Total stream power in right overbank.
CH_STA_L 158 Left station of channel.
CH_STA_R 159 Right station of channel.
BASE_WS 160 Water surface for first profile (used in comparison
of encroachments).
CENTER_STATION 161 Center station of main channel.
XS_DELTA_WS 162 Change in water surface between current section and
next one downstream.
XS_DELTA_EG 163 Change in energy gradeline between current section
and next one downstream.
SA_TOTAL 164 Cumulative surface area for entire cross section
(including ineffective areas) from the downstream
end of the reach.
SA_LEFT 165 Cumulative surface area for left overbank (including
ineffective areas) from the downstream end of the
reach.
SA_CHAN 166 Cumulative surface area for main channel (including
ineffective areas) from the downstream end of the
reach.
SA_RIGHT 167 Cumulative surface area for right overbank
(including ineffective areas) from the downstream
end of the reach.
ENC_METHOD 168 Encroachment method.
Q_GATE_GROUP 169 Flow through all gate openings in a gate group.
GATE_OPEN_HT 170 Height of gate opening.
GATE_NOPEN 171 The number of gates opened in the current group.
GATE_AREA 172 The flow area in an opened gate.
GATE_SUBMERG 173 The ratio of the downstream depth above the gate to
the upstream depth above the gate.
GATE_INVERT 174 Gate spillway invert elevation.
Q_GATES 175 Total flow through all of the gate groups of an
inline/lateral structure.
BR_OPEN_AREA 176 Total area of the entire bridge opening.
COEF_OF_Q 177 WSPRO bridge method coefficient of discharge.
CUM_CH_LEN 178 Cumulative Channel Length from the downstream end of
the reach.
ENC_WD 179 Encroachment Width.
OBS_WS 180 Observed Water Surface.
WS_AIR_ENTR 181 Water surface elevation accounting for air
entrainment.
BR_OPEN_VEL 182 Average velocity inside the bridge opening (Maximum
of BU and BD).
ICE_THICK_LOB 183 Ice thickness in the left overbank.
ICE_THICK_CHAN 184 Ice thickness in the main channel.
ICE_THICK_ROB 185 Ice thickness in the right overbank.
ICE_VOL_TOTAL 186 Cummulative volume of ice in an ice jam.
ICE_VOL_LOB 187 Cummulative volume of ice in the left overbank for
an ice jam.
ICE_VOL_CHAN 188 Cummulative volume of ice in the main channel for an
ice jam.
ICE_VOL_ROB 189 Cummulative volume of ice in the right overbank for
an ice jam.
ICE_TOP_LOB 190 The top elevation of ice in the left overbank.
ICE_TOP_CHAN 191 The top elevation of ice in the main channel.
ICE_TOP_ROB 192 The top elevation of ice in the right overbank.
ICE_BTM_LOB 193 The bottom elevation of ice in the left overbank.
ICE_BTM_CHAN 194 The bottom elevation of ice in the main channel.
ICE_BTM_ROB 195 The bottom elevation of ice in the right overbank.
INVERT_SLOPE 196 The slope from the invert of this cross section to
the next cross section downstream.
LOB_ELEV 197 The ground elevation at the left bank of the main
channel.
ROB_ELEV 198 The ground elevation at the right bank of the main
channel.
L_FREEBOARD 199 The freeboard in the main channel at the left bank.
R_FREEBOARD 200 The freeboard in the main channel at the right bank.
LEVEE_EL_LEFT 201 The elevation of the left levee.
LEVEE_EL_RIGHT 202 The elevation of the right levee.
INEFF_EL_LEFT 203 The elevation of the left ineffective area.
INEFF_EL_RIGHT 204 The elevation of the right ineffective area.
L_LEVEE_FRBRD 205 The freeboard before the left levee is over-topped.
R_LEVEE_FRBRD 206 The freeboard before the right levee is over-topped.
MANN_WTD_TOTAL 207 Mannings n value for the total main cross section.
HYDR_RADIUS 208 Hydraulic radius for cross section.
HYDR_RADIUS_L 209 Hydraulic radius in left over bank.
HYDR_RADIUS_C 210 Hydraulic radius in channel.
HYDR_RADIUS_R 211 Hydraulic radius for right over bank.
HYDR_RAD_2_3 212 Hydraulic radius for cross section to the 2/3 power.
WS_DS 213 Water surface downstream.
EG_DS 214 Energy elevation downstream.
MIN_WEIR_EL 215 Minimum weir elevation.
PERC_Q_LEAVING 216 Percentage of flow leaving through a lateral
structure.
Q_US 217 Flow in cross section upstream of a lateral
structure.
Q_DS 218 Flow in cross section downstream of lateral
structure.
WEIR_STA_US 219 Upstream station for weir flow starts.
WEIR_STA_DS 220 Downstream station where weir flow ends.
Q_LEAVING_TOTAL 221 Total flow leaving in a lateral structure including
all gates, culverts and lateral rating curves.
SA_MIN_EL 222 Minimum elevation of a storage area.
SA_AREA 223 Surface area of a storage area.
SA_VOLUME 224 Storage volume of a storage area.
TOP_W_ACT_LEFT 225 Top width of the wetted left bank, not including
ineffective flow.
TOP_W_ACT_CHAN 226 Top width of the wetted channel, not including
ineffective flow.
TOP_W_ACT_RIGHT 227 Top width of the wetted right bank, not including
ineffective flow.
CULV_DEPTH_BLOCKED 228 Depth of fill in a culvert.
CULV_INLET_MANN_N 229 The composite n value at the culvert inlet.
CULV_OUTLET_MANN_N 230 The composite n value at the culvert outlet.
ICE_WS_ERR 231 Convergence error in water surface for dynamic ice
jam.
ICE_ERR 232 Convergence error in ice thickness for dynamic ice
jam.
PIPING_FLOW 233 Flow from piping weir failure.
BREACH_CL 234 Center line of weir breach.
BREACH_WD 235 Bottom width of weir breach.
BREACH_BOTTOM_EL 236 Bottom Elevation of weir breach.
BREACH_TOP_EL 237 Top Elevation of weir breach.
BREACH_SSL 238 Left side slope of weir breach.
BREACH_SSR 239 Right side slope of weir breach.
Q_PUMP_GROUP 240 Pump group flow.
Q_LAT_RC 241 Lateral rating curve flow.
Q_CULV 242 Total flow in all culvert groups.
CULV_LENGTH 243 Length of the culvert barrel.
Q_PUMP_STATION 244 Total flow in all pump groups in a pump station.
WS_INLET 245 WS at the inlet of a pump station.
WS_OUTLET 246 WS at the outlet of a pump station.
PUMPING_HEAD 247 Pumping head for the pump station.
INFLOW 248 Total inflow into a storage area.
OUTFLOW 249 Total outflow into a storage area.
NET_FLUX 250 Net inflow - outflow for a storage area.
ENC_OFFSET_L 251 Minimum setback from the left overbank station.
ENC_OFFSET_R 252 Minimum setback from the right overbank station.
MIN_CH_PILOT 253 Minimum channel elevation (including pilot
channels).
DIFF 254 Difference between the previous two columns.
MIN_CH_EL_STA 255 Station of the minimum channel elevation.
CULV_AREA_DS 256 Cross sectional flow area in culvert at defined
downstream.
CULV_AREA_US 257 Cross sectional flow area in culvert at defined
upstream.
GATE_WEIR_COEF 258 Coefficient used in weir flow over the gate.
WEIR_COEF 259 Coefficient used in weir flow.
Q_BREACH 260 Flow through a breach.
BREACH_AVG_VELOCITY 261 Average flow velocity through a breach.
BREACH_FLOW_AREA 262 Flow area through a breach.
LEFT_STATION 263 Left station of the cross section.
RIGHT_STATION 264 Right station of the cross section.
LEVEE_STA_LEFT 265 Left levee station.
LEVEE_STA_RIGHT 266 Right levee station.
Q_INLINE_RC 267 Inline Outlet rating curve flow.
Q_OUTLET_TS 268 Inline/Lateral Outlet time series flow.
"""
import sys
PROFILE = 1
WS_ELEV = 2
EG_ELEV = 3
MAX_CHL_DPTH = 4
MIN_CH_EL = 5
Q_LEFT = 6
Q_CHANNEL = 7
Q_RIGHT = 8
Q_TOTAL = 9
FLOW_AREA = 10
FLOW_AREA_L = 11
FLOW_AREA_CH = 12
FLOW_AREA_R = 13
WP_TOTAL = 14
WP_LEFT = 15
WP_CHANNEL = 16
WP_RIGHT = 17
CONV_TOTAL = 18
CONV_LEFT = 19
CONV_CHNL = 20
CONV_RIGHT = 21
VEL_HEAD = 22
VEL_TOTAL = 23
VEL_LEFT = 24
VEL_CHNL = 25
VEL_RIGHT = 26
ALPHA = 27
BETA = 28
TOP_WDTH_ACT = 29
EG_SLOPE = 30
VOLUME = 31
AREA = 32
AREA_LEFT = 33
AREA_CHANNEL = 34
AREA_RIGHT = 35
STA_WS_LFT = 36
STA_WS_RGT = 37
LEFT_STA_EFF = 38
RGHT_STA_EFF = 39
LENGTH_WTD = 40
LENGTH_LEFT = 41
LENGTH_CHNL = 42
LENGTH_RGHT = 43
MANN_WTD_LEFT = 44
MANN_WTD_CHNL = 45
MANN_WTD_RGHT = 46
MANN_COMP = 47
FROUDE_N_CHL = 48
FROUDE_N_XS = 49
TRVL_TME_AVG = 50
TRVL_TME_CHL = 51
CONV_RATIO = 52
SPECIF_FORCE = 53
SPC_FORCE_PR = 54
WS_PRIME = 55
CRIT_WS = 56
CRIT_EG = 57
CRIT_DEPTH = 58
FRCTN_LOSS = 59
C_E_LOSS = 60
HEADLOSS = 61
TOP_WIDTH = 62
TOP_W_LEFT = 63
TOP_W_CHNL = 64
TOP_W_RIGHT = 65
NUM_TRIALS = 66
STD_STP_CASE = 67
FRCTN_SLOPE = 68
FRCTN_SLP_MD = 69
MIN_ERROR = 70
DELTA_WS = 71
DELTA_EG = 72
Q_CULV_GROUP = 73
Q_BARREL = 74
WS_US = 75
CLV_EG_NO_WR = 76
EG_US = 77
EG_IC = 78
EG_OC = 79
CULV_NML_DEPTH = 80
CULV_VEL_DS = 81
CULV_VEL_US = 82
CULV_FRCTN_LS = 83
CULV_ENTR_LOSS = 84
CULV_EXIT_LOSS = 85
CULV_FULL_LEN = 86
CULV_CRT_DEPTH = 87
CULV_INV_EL_UP = 88
CULV_INV_EL_DN = 89
CULV_EG_INLET = 90
CULV_EG_OUTLET = 91
CULV_WS_INLET = 92
CULV_WS_OUTLET = 93
Q_WEIR = 94
WEIR_FLOW_AREA = 95
WEIR_STA_LFT = 96
WEIR_STA_RGT = 97
WEIR_MAX_DEPTH = 98
WEIR_AVG_DEPTH = 99
WEIR_SUBMERG = 100
MIN_EL_WEIR_FLOW = 101
WR_TOP_WDTH = 102
ENERGY_WR_WS = 103
YARNELL_WS = 104
WSPRO_WS = 105
PRS_WR_WS_ = 106
ENERGY_WS = 107
MOMEN_WS = 108
PRS_O_WS = 109
ENERGY_WR_EG = 110
YARNELL_EG = 111
WSPRO_EG = 112
PRS_WR_EG = 113
ENERGY_EG = 114
MOMEN_EG = 115
PRS_O_EG = 116
BR_SEL_METHOD = 117
MIN_EL_PRS = 118
CRIT_NUM = 119
CRIT_WS_1 = 120
CRIT_WS_2 = 121
CRIT_WS_3 = 122
CRIT_ENRGY_1 = 123
CRIT_ENRGY_2 = 124
CRIT_ENRGY_3 = 125
HYDR_DEPTH = 126
HYDR_DEPTH_L = 127
HYDR_DEPTH_C = 128
HYDR_DEPTH_R = 129
DECK_WIDTH = 130
N_BARRELS = 131
Q_BRIDGE = 132
VOL_LEFT = 133
VOL_CHAN = 134
VOL_RIGHT = 135
MIN_EL = 136
ENC_VAL_1 = 137
ENC_VAL_2 = 138
ENC_STA_L = 139
ENC_STA_R = 140
DIST_CENTER_L = 141
DIST_CENTER_R = 142
K_PERC_L = 143
K_PERC_R = 144
Q_PERC_L = 145
Q_PERC_CHAN = 146
Q_PERC_R = 147
PROF_DELTA_WS = 148
PROF_DELTA_EG = 149
SHEAR_TOTAL = 150
SHEAR_LOB = 151
SHEAR_CHAN = 152
SHEAR_ROB = 153
POWER_TOTAL = 154
POWER_LOB = 155
POWER_CHAN = 156
POWER_ROB = 157
CH_STA_L = 158
CH_STA_R = 159
BASE_WS = 160
CENTER_STATION = 161
XS_DELTA_WS = 162
XS_DELTA_EG = 163
SA_TOTAL = 164
SA_LEFT = 165
SA_CHAN = 166
SA_RIGHT = 167
ENC_METHOD = 168
Q_GATE_GROUP = 169
GATE_OPEN_HT = 170
GATE_NOPEN = 171
GATE_AREA = 172
GATE_SUBMERG = 173
GATE_INVERT = 174
Q_GATES = 175
BR_OPEN_AREA = 176
COEF_OF_Q = 177
CUM_CH_LEN = 178
ENC_WD = 179
OBS_WS = 180
WS_AIR_ENTR = 181
BR_OPEN_VEL = 182
ICE_THICK_LOB = 183
ICE_THICK_CHAN = 184
ICE_THICK_ROB = 185
ICE_VOL_TOTAL = 186
ICE_VOL_LOB = 187
ICE_VOL_CHAN = 188
ICE_VOL_ROB = 189
ICE_TOP_LOB = 190
ICE_TOP_CHAN = 191
ICE_TOP_ROB = 192
ICE_BTM_LOB = 193
ICE_BTM_CHAN = 194
ICE_BTM_ROB = 195
INVERT_SLOPE = 196
LOB_ELEV = 197
ROB_ELEV = 198
L_FREEBOARD = 199
R_FREEBOARD = 200
LEVEE_EL_LEFT = 201
LEVEE_EL_RIGHT = 202
INEFF_EL_LEFT = 203
INEFF_EL_RIGHT = 204
L_LEVEE_FRBRD = 205
R_LEVEE_FRBRD = 206
MANN_WTD_TOTAL = 207
HYDR_RADIUS = 208
HYDR_RADIUS_L = 209
HYDR_RADIUS_C = 210
HYDR_RADIUS_R = 211
HYDR_RAD_2_3 = 212
WS_DS = 213
EG_DS = 214
MIN_WEIR_EL = 215
PERC_Q_LEAVING = 216
Q_US = 217
Q_DS = 218
WEIR_STA_US = 219
WEIR_STA_DS = 220
Q_LEAVING_TOTAL = 221
SA_MIN_EL = 222
SA_AREA = 223
SA_VOLUME = 224
TOP_W_ACT_LEFT = 225
TOP_W_ACT_CHAN = 226
TOP_W_ACT_RIGHT = 227
CULV_DEPTH_BLOCKED = 228
CULV_INLET_MANN_N = 229
CULV_OUTLET_MANN_N = 230
ICE_WS_ERR = 231
ICE_ERR = 232
PIPING_FLOW = 233
BREACH_CL = 234
BREACH_WD = 235
BREACH_BOTTOM_EL = 236
BREACH_TOP_EL = 237
BREACH_SSL = 238
BREACH_SSR = 239
Q_PUMP_GROUP = 240
Q_LAT_RC = 241
Q_CULV = 242
CULV_LENGTH = 243
Q_PUMP_STATION = 244
WS_INLET = 245
WS_OUTLET = 246
PUMPING_HEAD = 247
INFLOW = 248
OUTFLOW = 249
NET_FLUX = 250
ENC_OFFSET_L = 251
ENC_OFFSET_R = 252
MIN_CH_PILOT = 253
DIFF = 254
MIN_CH_EL_STA = 255
CULV_AREA_DS = 256
CULV_AREA_US = 257
GATE_WEIR_COEF = 258
WEIR_COEF = 259
Q_BREACH = 260
BREACH_AVG_VELOCITY = 261
BREACH_FLOW_AREA = 262
LEFT_STATION = 263
RIGHT_STATION = 264
LEVEE_STA_LEFT = 265
LEVEE_STA_RIGHT = 266
Q_INLINE_RC = 267
Q_OUTLET_TS = 268
def print_help(sort_alpha=False):
"""Print a table of all the available variable names.
Parameters
----------
sort_alpha : bool, optional
True to order output table aphabetically.
"""
current_module = sys.modules[__name__]
doc = current_module.__doc__
if sort_alpha:
break_ = '-'*79
start_table = doc.split('\n').index(break_) + 3
content = doc.split('\n')[start_table:-1]
last = [' ---']
content += last*2
names = []
dic = {} # varname, lines
i = 0
while True:
line = content[i]
name = line[2:].split(' ')[0]
line_next = content[i+1]
line_next_next = content[i+2]
if line_next[2] != ' ':
i += 1
lines = [line]
elif line_next[2] == ' ' and line_next_next[2] != ' ':
i += 2
lines = [line, line_next]
elif line_next[2] == ' ' and line_next_next[2] == ' ':
i += 3
lines = [line, line_next, line_next_next]
dic[name] = lines
names.append(name)
if i >= len(content) - 2:
break
names = sorted(names)
content = doc.split('\n')[:start_table]
for name in names:
content += dic[name]
print('\n' .join(content))
else:
print(doc)
if __name__ == '__main__':
print_help()
print_help(sort_alpha=True)
| |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import os
import re
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova import quota
osapi_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='The maximum number of items returned in a single '
'response from a collection resource'),
cfg.StrOpt('osapi_compute_link_prefix',
help='Base URL that will be presented to users in links '
'to the OpenStack Compute API'),
cfg.StrOpt('osapi_glance_link_prefix',
help='Base URL that will be presented to users in links '
'to glance resources'),
]
CONF = cfg.CONF
CONF.register_opts(osapi_opts)
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
# NOTE(cyeoh): A common regexp for acceptable names (user supplied)
# that we want all new extensions to conform to unless there is a very
# good reason not to.
VALID_NAME_REGEX = re.compile("^(?! )[\w. _-]+(?<! )$", re.UNICODE)
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
_STATE_MAP = {
vm_states.ACTIVE: {
'default': 'ACTIVE',
task_states.REBOOTING: 'REBOOT',
task_states.REBOOT_PENDING: 'REBOOT',
task_states.REBOOT_STARTED: 'REBOOT',
task_states.REBOOTING_HARD: 'HARD_REBOOT',
task_states.REBOOT_PENDING_HARD: 'HARD_REBOOT',
task_states.REBOOT_STARTED_HARD: 'HARD_REBOOT',
task_states.UPDATING_PASSWORD: 'PASSWORD',
task_states.REBUILDING: 'REBUILD',
task_states.REBUILD_BLOCK_DEVICE_MAPPING: 'REBUILD',
task_states.REBUILD_SPAWNING: 'REBUILD',
task_states.MIGRATING: 'MIGRATING',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
},
vm_states.BUILDING: {
'default': 'BUILD',
},
vm_states.STOPPED: {
'default': 'SHUTOFF',
task_states.RESIZE_PREP: 'RESIZE',
task_states.RESIZE_MIGRATING: 'RESIZE',
task_states.RESIZE_MIGRATED: 'RESIZE',
task_states.RESIZE_FINISH: 'RESIZE',
},
vm_states.RESIZED: {
'default': 'VERIFY_RESIZE',
# Note(maoy): the OS API spec 1.1 doesn't have CONFIRMING_RESIZE
# state so we comment that out for future reference only.
#task_states.RESIZE_CONFIRMING: 'CONFIRMING_RESIZE',
task_states.RESIZE_REVERTING: 'REVERT_RESIZE',
},
vm_states.PAUSED: {
'default': 'PAUSED',
},
vm_states.SUSPENDED: {
'default': 'SUSPENDED',
},
vm_states.RESCUED: {
'default': 'RESCUE',
},
vm_states.ERROR: {
'default': 'ERROR',
},
vm_states.DELETED: {
'default': 'DELETED',
},
vm_states.SOFT_DELETED: {
'default': 'SOFT_DELETED',
},
vm_states.SHELVED: {
'default': 'SHELVED',
},
vm_states.SHELVED_OFFLOADED: {
'default': 'SHELVED_OFFLOADED',
},
}
def status_from_state(vm_state, task_state='default'):
"""Given vm_state and task_state, return a status string."""
task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN'))
status = task_map.get(task_state, task_map['default'])
if status == "UNKNOWN":
LOG.error(_LE("status is UNKNOWN from vm_state=%(vm_state)s "
"task_state=%(task_state)s. Bad upgrade or db "
"corrupted?"),
{'vm_state': vm_state, 'task_state': task_state})
return status
def task_and_vm_state_from_status(statuses):
"""Map the server's multiple status strings to list of vm states and
list of task states.
"""
vm_states = set()
task_states = set()
lower_statuses = [status.lower() for status in statuses]
for state, task_map in _STATE_MAP.iteritems():
for task_state, mapped_state in task_map.iteritems():
status_string = mapped_state
if status_string.lower() in lower_statuses:
vm_states.add(state)
task_states.add(task_state)
# Add sort to avoid different order on set in Python 3
return sorted(vm_states), sorted(task_states)
def get_sort_params(input_params, default_key='created_at',
default_dir='desc'):
"""Retrieves sort keys/directions parameters.
Processes the parameters to create a list of sort keys and sort directions
that correspond to the 'sort_key' and 'sort_dir' parameter values. These
sorting parameters can be specified multiple times in order to generate
the list of sort keys and directions.
The input parameters are not modified.
:param input_params: webob.multidict of request parameters (from
nova.wsgi.Request.params)
:param default_key: default sort key value, added to the list if no
'sort_key' parameters are supplied
:param default_dir: default sort dir value, added to the list if no
'sort_dir' parameters are supplied
:returns: list of sort keys, list of sort dirs
"""
params = input_params.copy()
sort_keys = []
sort_dirs = []
while 'sort_key' in params:
sort_keys.append(params.pop('sort_key').strip())
while 'sort_dir' in params:
sort_dirs.append(params.pop('sort_dir').strip())
if len(sort_keys) == 0 and default_key:
sort_keys.append(default_key)
if len(sort_dirs) == 0 and default_dir:
sort_dirs.append(default_dir)
return sort_keys, sort_dirs
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_int_param(request, 'limit')
if 'page_size' in request.GET:
params['page_size'] = _get_int_param(request, 'page_size')
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
return params
def _get_int_param(request, param):
"""Extract integer param from request or fail."""
try:
int_param = int(request.GET[param])
except ValueError:
msg = _('%s param must be an integer') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
if int_param < 0:
msg = _('%s param must be positive') % param
raise webob.exc.HTTPBadRequest(explanation=msg)
return int_param
def _get_marker_param(request):
"""Extract marker id from request or fail."""
return request.GET['marker']
def limited(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit):
"""get limited parameter from request."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
limit = min(max_limit, limit)
marker = params.get('marker')
return limit, marker
def get_id_from_href(href):
"""Return the id or uuid portion of a url.
Given: 'http://www.foo.com/bar/123?q=4'
Returns: '123'
Given: 'http://www.foo.com/bar/abc123?q=4'
Returns: 'abc123'
"""
return urlparse.urlsplit("%s" % href).path.split('/')[-1]
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://www.nova.com/v1.1/123'
Returns: 'http://www.nova.com/123'
Given: 'http://www.nova.com/v1.1'
Returns: 'http://www.nova.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if expression.match(url_parts[1]):
del url_parts[1]
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
LOG.debug('href %s does not contain version' % href)
raise ValueError(_('href %s does not contain version') % href)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
def check_img_metadata_properties_quota(context, metadata):
if not metadata:
return
try:
QUOTAS.limit_check(context, metadata_items=len(metadata))
except exception.OverQuota:
expl = _("Image metadata limit exceeded")
raise webob.exc.HTTPForbidden(explanation=expl)
# check the key length.
if isinstance(metadata, dict):
for key, value in metadata.iteritems():
if len(key) == 0:
expl = _("Image metadata key cannot be blank")
raise webob.exc.HTTPBadRequest(explanation=expl)
if len(key) > 255:
expl = _("Image metadata key too long")
raise webob.exc.HTTPBadRequest(explanation=expl)
else:
expl = _("Invalid image metadata")
raise webob.exc.HTTPBadRequest(explanation=expl)
def dict_to_query_str(params):
# TODO(throughnothing): we should just use urllib.urlencode instead of this
# But currently we don't work with urlencoded url's
param_str = ""
for key, val in params.iteritems():
param_str = param_str + '='.join([str(key), str(val)]) + '&'
return param_str.rstrip('&')
def get_networks_for_instance_from_nw_info(nw_info):
networks = {}
for vif in nw_info:
ips = vif.fixed_ips()
floaters = vif.floating_ips()
label = vif['network']['label']
if label not in networks:
networks[label] = {'ips': [], 'floating_ips': []}
networks[label]['ips'].extend(ips)
networks[label]['floating_ips'].extend(floaters)
for ip in itertools.chain(networks[label]['ips'],
networks[label]['floating_ips']):
ip['mac_address'] = vif['address']
return networks
def get_networks_for_instance(context, instance):
"""Returns a prepared nw_info list for passing into the view builders
We end up with a data structure like::
{'public': {'ips': [{'address': '10.0.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '2001::1',
'version': 6,
'mac_address': 'aa:aa:aa:aa:aa:aa'}],
'floating_ips': [{'address': '172.16.0.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'},
{'address': '172.16.2.1',
'version': 4,
'mac_address': 'aa:aa:aa:aa:aa:aa'}]},
...}
"""
nw_info = compute_utils.get_nw_info_for_instance(instance)
return get_networks_for_instance_from_nw_info(nw_info)
def raise_http_conflict_for_instance_invalid_state(exc, action):
"""Raises a webob.exc.HTTPConflict instance containing a message
appropriate to return via the API based on the original
InstanceInvalidState exception.
"""
attr = exc.kwargs.get('attr')
state = exc.kwargs.get('state')
not_launched = exc.kwargs.get('not_launched')
if attr and state:
msg = _("Cannot '%(action)s' while instance is in %(attr)s "
"%(state)s") % {'action': action, 'attr': attr, 'state': state}
elif not_launched:
msg = _("Cannot '%s' an instance which has never been active") % action
else:
# At least give some meaningful message
msg = _("Instance is in an invalid state for '%s'") % action
raise webob.exc.HTTPConflict(explanation=msg)
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = xmlutil.safe_minidom_parse_string(text)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = xmlutil.safe_minidom_parse_string(text)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
class MetadataXMLDeserializer(wsgi.XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request."""
if metadata_node is None:
return {}
metadata = {}
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
def _extract_metadata_container(self, datastring):
dom = xmlutil.safe_minidom_parse_string(datastring)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
def create(self, datastring):
return self._extract_metadata_container(datastring)
def update_all(self, datastring):
return self._extract_metadata_container(datastring)
def update(self, datastring):
dom = xmlutil.safe_minidom_parse_string(datastring)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
metadata_nsmap = {None: xmlutil.XMLNS_V11}
class MetaItemTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector('meta', xmlutil.get_items, 0)
root = xmlutil.TemplateElement('meta', selector=sel)
root.set('key', 0)
root.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
class MetadataTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return True
class MetadataTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = MetadataTemplateElement('metadata', selector='metadata')
elem = xmlutil.SubTemplateElement(root, 'meta',
selector=xmlutil.get_items)
elem.set('key', 0)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
def check_snapshots_enabled(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.allow_instance_snapshots:
LOG.warn(_LW('Rejecting snapshot request, snapshots currently'
' disabled'))
msg = _("Instance snapshots are not permitted at this time.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return f(*args, **kwargs)
return inner
class ViewBuilder(object):
"""Model API responses as dictionaries."""
def _get_project_id(self, request):
"""Get project id from request url if present or empty string
otherwise
"""
project_id = request.environ["nova.context"].project_id
if project_id in request.url:
return project_id
return ''
def _get_links(self, request, identifier, collection_name):
return [{
"rel": "self",
"href": self._get_href_link(request, identifier, collection_name),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
identifier,
collection_name),
}]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_compute_link_prefix(request.application_url)
url = os.path.join(prefix,
self._get_project_id(request),
collection_name)
return "%s?%s" % (url, dict_to_query_str(params))
def _get_href_link(self, request, identifier, collection_name):
"""Return an href string pointing to this object."""
prefix = self._update_compute_link_prefix(request.application_url)
return os.path.join(prefix,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier, collection_name):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_compute_link_prefix(base_url)
return os.path.join(base_url,
self._get_project_id(request),
collection_name,
str(identifier))
def _get_collection_links(self,
request,
items,
collection_name,
id_key="uuid"):
"""Retrieve 'next' link, if applicable. This is included if:
1) 'limit' param is specified and equals the number of items.
2) 'limit' param is specified but it exceeds CONF.osapi_max_limit,
in this case the number of items is CONF.osapi_max_limit.
3) 'limit' param is NOT specified but the number of items is
CONF.osapi_max_limit.
"""
links = []
max_items = min(
int(request.params.get("limit", CONF.osapi_max_limit)),
CONF.osapi_max_limit)
if max_items and max_items == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
elif 'id' in last_item:
last_item_id = last_item["id"]
else:
last_item_id = last_item["flavorid"]
links.append({
"rel": "next",
"href": self._get_next_link(request,
last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
return urlparse.urlunsplit(url_parts)
def _update_glance_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_glance_link_prefix)
def _update_compute_link_prefix(self, orig_url):
return self._update_link_prefix(orig_url,
CONF.osapi_compute_link_prefix)
def get_instance(compute_api, context, instance_id, want_objects=False,
expected_attrs=None):
"""Fetch an instance from the compute API, handling error checking."""
try:
return compute_api.get(context, instance_id,
want_objects=want_objects,
expected_attrs=expected_attrs)
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
def check_cells_enabled(function):
@functools.wraps(function)
def inner(*args, **kwargs):
if not CONF.cells.enable:
msg = _("Cells is not enabled.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return function(*args, **kwargs)
return inner
| |
"""A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations when
dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seemlessly with standard file IO operations and the os module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only gzip
and bz2 are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/index.html')
>>>
>>> # Use the file as you normally would
>>> fp.read()
>>> fp.close()
"""
__docformat__ = "restructuredtext en"
import os
from shutil import rmtree, copyfile, copyfileobj
_open = open
# Using a class instead of a module-level dictionary
# to reduce the inital 'import numpy' overhead by
# deferring the import of bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners(object):
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way that
an instance of `_FileOpeners` itself can be indexed with the keys of that
dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.BZ2File
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression
methods.
"""
self._load()
return self._file_openers.keys()
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the `DataSource`
`destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by path.
Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created. The
default path is the current directory.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode)
class DataSource (object):
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the low-level
details of downloading the file, allowing you to simply pass in a valid
file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = DataSource('/home/guido')
>>> urlname = 'http://www.google.com/index.html'
>>> gfile = ds.open('http://www.google.com/index.html') # remote file
>>> ds.abspath(urlname)
'/home/guido/www.google.com/site/index.html'
>>> ds = DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
from urlparse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib2 import urlopen
from urllib2 import URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
openedurl = urlopen(path)
f = _open(upath, 'wb')
try:
copyfileobj(openedurl, f)
finally:
f.close()
except URLError:
raise URLError("URL not found: %s" % path)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return
the path to the cached file.
If path is a local file, _findfile will return a path to that local
file.
The search will include possible compressed versions of the file and
return the first occurence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
from urlparse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib2 import urlopen
from urllib2 import URLError
# Test local path
if os.path.exists(path):
return True
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r'):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the `DataSource`
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or directory)
to all the files it handles. Use `Repository` when you will be working
with multiple files from one base URL. Initialize `Repository` with the
base URL, then refer to each file by its filename only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r'):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the DataSource
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError(
"Directory listing of URLs, not supported yet.")
else:
return os.listdir(self._baseurl)
| |
# -*- coding: utf-8 -*-
import unittest
import os
import pickle
import pandas as pd
import numpy as np
from td_query import ROOT_PATH
from td_query.data_manipulate import data_manipulate_instance as instance
from teradata import UdaExec
class TestDataManipulateAPAC(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("**************************************** setUpClass ****************************************")
instance.init()
print(instance.teradata)
@classmethod
def tearDownClass(cls):
print("************************************** tearDownClass ***************************************")
def setUp(self):
print("****** setUp *******")
def tearDown(self):
print("***** tearDown *****")
def _example(self):
df = instance.query_sample()
# with open(ROOT_PATH + '/external/df_dispatch_bna.pickle', 'wb') as f: # save
# pickle.dump(df, f)
print(df)
def _calculate(self):
def percent(x, y):
return round(x/y*100, 2)
total = 115554
print(
percent(2877, total),
percent(3909, total),
percent(23030, total),
percent(18840, total),
percent(66898, total),
)
def _query(self):
query = '''select top 10 * from pp_scratch_risk.ms_auto_trend_us_bad;'''
df = instance.query(query)
print(df)
def _query_table_schema(self):
dest_db = "pp_scratch_risk"
dest_table = "ms_auto_trend_us2_1_3_100_100_1_1_1"
result_cursor = instance.teradata.execute("show select * from {}.{};".format(dest_db, dest_table))
last_row = result_cursor.fetchall()
print(last_row)
def _query_table_top_rows(self):
table = "pp_scratch_risk.ms_auto_trend_us_bad"
df = instance.query_table_top_rows(table)
print(df)
def _transalte_1_1_1_1_1(self):
rules = [
"(SELLER_CONSUMER_SEG != 'Y') & (RCVR_CNTRY_CODE == 'C2 ') & (FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5)",
"(SELLER_CONSUMER_SEG != 'Y') & (RCVR_CNTRY_CODE == 'C2 ') & (FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F < 0.5) & (SNDR_CNTRY_CODE != 'FR ') & (SUB_FLOW != 'MS Subscription') & (SNDR_CNTRY_CODE != 'ES ') & (SNDR_CNTRY_CODE == 'US ') & (SUB_FLOW != 'MS PayPal Cart') & (SUB_FLOW != 'MS Mobile Shopping Cart Upload') & (dof_bin == 'e->1y') & (amt2 != 'a-1k') & (SUB_FLOW == 'MS Single Line Payment') & (SELLER_CONSUMER_SEG == 'C')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_100_63_22_14_1(self):
rules = [
"(SUB_FLOW != 'MS MassPay') & (IS_ULP_TRANS_T_F >= 0.5) & (SNDR_CNTRY_CODE != 'AU ') & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (RCVR_CNTRY_CODE != 'NZ ') & (RCVR_CNTRY_CODE != 'JP ') & (SNDR_CNTRY_CODE != 'C2 ') & (RCVR_CNTRY_CODE != 'SG ') & (amt2 != 'e-<50') & (SNDR_CNTRY_CODE != 'HK ') & (dof_bin != 'b-30') & (SUB_FLOW != 'MS Shopping Cart Upload') & (SNDR_CNTRY_CODE != 'ES ') & (SUB_FLOW != 'MS Mobile Shopping Cart Upload')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_637_64_22_14_1(self):
rules = [
"(SUB_FLOW != 'MS MassPay') & (SNDR_CNTRY_CODE == 'US ') & (FLOW_FAMILY == 'MS FF Website Payments Standard') & (amt2 == 'b-5h') & (RCVR_CNTRY_CODE != 'TH ') & (RCVR_CNTRY_CODE != 'JP ') & (IS_ULP_TRANS_T_F >= 0.5)",
"(SUB_FLOW != 'MS MassPay') & (SNDR_CNTRY_CODE != 'US ') & (RCVR_CNTRY_CODE == 'C2 ') & (IS_ULP_TRANS_T_F >= 0.5) & (SNDR_CNTRY_CODE != 'ES ') & (amt2 != 'e-<50')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_74_16_5_4_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (RCVR_CNTRY_CODE != 'VN ') & (SNDR_CNTRY_CODE != 'AU ') & (amt2 != 'e-<50') & (RCVR_CNTRY_CODE != 'NZ ') & (dof_bin != 'b-30') & (RCVR_CNTRY_CODE != 'MY ') & (SNDR_CNTRY_CODE != 'ES ')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_637_64_11_8_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_637_63_22_14_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_100_64_11_8_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_100_64_22_14_1(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_1000_500_100_50_1(self):
rules = [
"(SUB_FLOW != 'MS MassPay') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'AU ') & (SNDR_CNTRY_CODE != 'VN ') & (RCVR_CNTRY_CODE != 'NZ ') & (RCVR_CNTRY_CODE != 'JP ') & (RCVR_CNTRY_CODE != 'SG ') & (SNDR_CNTRY_CODE != 'C2 ') & (dof_bin != 'b-30') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_1_85_7_4_5(self):
rules = [
"(IS_ULP_TRANS_T_F >= 0.5) & (amt2 == 'b-5h') & (FLOW_FAMILY == 'MS FF Website Payments Standard')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_tpv(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (RCVR_CNTRY_CODE != 'VN ') & (SNDR_CNTRY_CODE != 'AU ') & (amt2 != 'e-<50') & (dof_bin != 'b-30') & (RCVR_CNTRY_CODE != 'NZ ') & (RCVR_CNTRY_CODE != 'MY ') & (SNDR_CNTRY_CODE != 'ES ') & (amt2 != 'c-1h') & (SNDR_CNTRY_CODE != 'GB ')",
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_tpv2(self):
rules = [
"(SUB_FLOW != 'MS MassPay') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'AU ') & (RCVR_CNTRY_CODE != 'VN ') & (RCVR_CNTRY_CODE != 'NZ ') & (RCVR_CNTRY_CODE != 'JP ') & (RCVR_CNTRY_CODE != 'SG ') & (amt2 != 'e-<50') & (SNDR_CNTRY_CODE != 'C2 ') & (dof_bin != 'b-30') & (SNDR_CNTRY_CODE != 'HK ') & (amt2 == 'b-5h')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _transalte_mix(self):
rules = [
"(FLOW_FAMILY == 'MS FF Website Payments Standard') & (IS_ULP_TRANS_T_F >= 0.5) & (amt2 != 'a-1k') & (SNDR_CNTRY_CODE != 'VN ') & (SNDR_CNTRY_CODE != 'AU ') & (RCVR_CNTRY_CODE != 'NZ ') & (dof_bin != 'b-30') & (amt2 != 'e-<50') & (RCVR_CNTRY_CODE != 'MY ') & (SNDR_CNTRY_CODE != 'ES ') & (SUB_FLOW != 'MS Shopping Cart Upload')"
]
result = instance.translate_hyperloop_rules_to_sql(rules)
print(result)
def _duplicate_rows_to_new_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac_off_ebay_non_ep_consumer_Day35_trend_month_2017_11_train'
dest_db = "pp_scratch_risk"
weight_a = 100
weight_b = 100
weight_c = 100
weight_d = 100
weight_e = 100
dest_table = "ms_auto_trend_apac_1_3_{}_{}_{}_{}_{}".format(weight_a, weight_b, weight_c, weight_d, weight_e)
dest_table = "ms_auto_trend_apac_off_ebay_non_ep_consumer_Day35_trend_month_2017_11_train_{}_{}_{}_{}_{}".format(weight_a, weight_b, weight_c, weight_d, weight_e)
instance.duplicate_rows_to_new_table(src_db, src_table, dest_db, dest_table, weight_a, weight_b, weight_c, weight_d, weight_e)
def _duplicate_rows_from_bad_and_sample_from_good_into_new_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac'
dest_db = "pp_scratch_risk"
bad_scale = 1
good_scale = 3
weight_a = 1
weight_b = 85
weight_c = 7
weight_d = 4
weight_e = 3
dest_table = "ms_auto_trend_apac_{}_{}__{}_{}_{}_{}_{}_v2".format(bad_scale, good_scale, weight_a, weight_b, weight_c, weight_d, weight_e)
instance.duplicate_rows_from_bad_and_sample_from_good_into_new_table(src_db, src_table, dest_db, dest_table,
bad_scale, good_scale,
weight_a, weight_b, weight_c, weight_d, weight_e)
def _generate_hl_job_json(self):
training_table = "ms_auto_trend_apac_1_3__1_85_7_4_3_v2"
testing_table = "ms_auto_trend_apac_t"
instance.generate_hl_job_json(training_table, testing_table)
def _add_weight_col_to_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac_1_3'
# weight_a = 0.312
# weight_b = 0.140
# weight_c = 0.011
# weight_d = 0.011
# weight_e = 0.001
weight_a = 10 * 74
weight_b = 8 * 16
weight_c = 4.6 * 5
weight_d = 3.7 * 4
weight_e = 1 * 1
instance.add_weight_col_to_table(src_db, src_table, weight_a, weight_b, weight_c, weight_d, weight_e)
def _update_weight_col_in_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac_1_3'
src_col = 'PMT_USD_AMT'
instance.update_weight_col_in_table(src_db, src_table, src_col)
def _update_custom_weight_col_in_table(self):
src_db = "pp_scratch_risk"
src_table = 'ms_auto_trend_apac_1_3'
src_col = 'PMT_USD_AMT'
instance.update_custom_weight_col_in_table(src_db, src_table, src_col)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import six
from monty.io import zopen
from monty.re import regrep
from collections import defaultdict
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.io_utils import clean_lines
"""
This module implements input and output processing from PWSCF.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "3/27/15"
class PWInput(object):
"""
Base input file class. Right now, only supports no symmetry and is
very basic.
"""
def __init__(self, structure, pseudo=None, control=None, system=None,
electrons=None, ions=None, cell=None, kpoints_mode="automatic",
kpoints_grid=(1, 1, 1),kpoints_shift=(0, 0, 0)):
"""
Initializes a PWSCF input file.
Args:
structure (Structure): Input structure. For spin-polarized calculation,
properties (e.g. {"starting_magnetization": -0.5,
"pseudo": "Mn.pbe-sp-van.UPF"}) on each site is needed instead of
pseudo (dict).
pseudo (dict): A dict of the pseudopotentials to use. Default to None.
control (dict): Control parameters. Refer to official PWSCF doc
on supported parameters. Default to {"calculation": "scf"}
system (dict): System parameters. Refer to official PWSCF doc
on supported parameters. Default to None, which means {}.
electrons (dict): Electron parameters. Refer to official PWSCF doc
on supported parameters. Default to None, which means {}.
ions (dict): Ions parameters. Refer to official PWSCF doc
on supported parameters. Default to None, which means {}.
cell (dict): Cell parameters. Refer to official PWSCF doc
on supported parameters. Default to None, which means {}.
kpoints_mode (str): Kpoints generation mode. Default to automatic.
kpoints_grid (sequence): The kpoint grid. Default to (1, 1, 1).
kpoints_shift (sequence): The shift for the kpoints. Defaults to
(0, 0, 0).
"""
self.structure = structure
sections = {}
sections["control"] = control or {"calculation": "scf"}
sections["system"] = system or {}
sections["electrons"] = electrons or {}
sections["ions"] = ions or {}
sections["cell"] = cell or {}
if pseudo == None:
for site in structure:
try:
site.properties['pseudo']
except KeyError:
raise PWInputError("Missing %s in pseudo specification!"
% site)
else:
for species in self.structure.composition.keys():
if species.symbol not in pseudo:
raise PWInputError("Missing %s in pseudo specification!"
% species.symbol)
self.pseudo = pseudo
self.sections = sections
self.kpoints_mode = kpoints_mode
self.kpoints_grid = kpoints_grid
self.kpoints_shift = kpoints_shift
def __str__(self):
out = []
site_descriptions = {}
if self.pseudo != None:
site_descriptions = self.pseudo
else:
c = 1
for site in self.structure:
name = None
for k, v in site_descriptions.items():
if site.properties == v:
name = k
if name == None:
name = site.specie.symbol+str(c)
site_descriptions[name] = site.properties
c += 1
def to_str(v):
if isinstance(v, six.string_types):
return "'%s'" % v
elif isinstance(v, float):
return "%s" % str(v).replace("e", "d")
elif isinstance(v, bool):
if v:
return ".TRUE."
else:
return ".FALSE."
return v
for k1 in ["control", "system", "electrons", "ions", "cell"]:
v1 = self.sections[k1]
out.append("&%s" % k1.upper())
sub = []
for k2 in sorted(v1.keys()):
if isinstance(v1[k2], list):
n = 1
for l in v1[k2][:len(site_descriptions)]:
sub.append(" %s(%d) = %s" % (k2, n, to_str(v1[k2][n-1])))
n += 1
else:
sub.append(" %s = %s" % (k2, to_str(v1[k2])))
if k1 == "system":
if 'ibrav' not in self.sections[k1]:
sub.append(" ibrav = 0")
if 'nat' not in self.sections[k1]:
sub.append(" nat = %d" % len(self.structure))
if 'ntyp' not in self.sections[k1]:
sub.append(" ntyp = %d" % len(site_descriptions))
sub.append("/")
out.append(",\n".join(sub))
out.append("ATOMIC_SPECIES")
for k, v in sorted(site_descriptions.items(), key=lambda i: i[0]):
e = re.match(r"[A-Z][a-z]?", k).group(0)
if self.pseudo is not None:
p = v
else:
p = v['pseudo']
out.append(" %s %.4f %s" % (k, Element(e).atomic_mass, p))
out.append("ATOMIC_POSITIONS crystal")
if self.pseudo is not None:
for site in self.structure:
out.append(" %s %.6f %.6f %.6f" % (site.specie.symbol, site.a,
site.b, site.c))
else:
for site in self.structure:
name = None
for k, v in sorted(site_descriptions.items(),
key=lambda i: i[0]):
if v == site.properties:
name = k
out.append(" %s %.6f %.6f %.6f" % (name, site.a, site.b, site.c))
out.append("K_POINTS %s" % self.kpoints_mode)
kpt_str = ["%s" % i for i in self.kpoints_grid]
kpt_str.extend(["%s" % i for i in self.kpoints_shift])
out.append(" %s" % " ".join(kpt_str))
out.append("CELL_PARAMETERS angstrom")
for vec in self.structure.lattice.matrix:
out.append(" %f %f %f" % (vec[0], vec[1], vec[2]))
return "\n".join(out)
def write_file(self, filename):
"""
Write the PWSCF input file.
Args:
filename (str): The string filename to output to.
"""
with open(filename, "w") as f:
f.write(self.__str__())
@staticmethod
def from_file(filename):
"""
Reads an PWInput object from a file.
Args:
filename (str): Filename for file
Returns:
PWInput object
"""
with zopen(filename, "rt") as f:
return PWInput.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads an PWInput object from a string.
Args:
string (str): PWInput string
Returns:
PWInput object
"""
lines = list(clean_lines(string.splitlines()))
def input_mode(line):
if line[0] == "&":
return ("sections", line[1:].lower())
elif "ATOMIC_SPECIES" in line:
return ("pseudo", )
elif "K_POINTS" in line:
return ("kpoints", line.split("{")[1][:-1])
elif "CELL_PARAMETERS" in line or "ATOMIC_POSITIONS" in line:
return ("structure", line.split("{")[1][:-1])
elif line == "/":
return None
else:
return mode
sections = {"control": {}, "system": {}, "electrons": {},
"ions": {}, "cell":{}}
pseudo = {}
pseudo_index = 0
lattice = []
species = []
coords = []
structure = None
site_properties = {"pseudo":[]}
mode = None
for line in lines:
mode = input_mode(line)
if mode == None:
pass
elif mode[0] == "sections":
section = mode[1]
m = re.match(r'(\w+)\(?(\d*?)\)?\s*=\s*(.*)', line)
if m:
key = m.group(1).strip()
key_ = m.group(2).strip()
val = m.group(3).strip()
if key_ != "":
if sections[section].get(key, None) == None:
val_ = [0.0]*20 # MAX NTYP DEFINITION
val_[int(key_)-1] = PWInput.proc_val(key, val)
sections[section][key] = val_
site_properties[key] = []
else:
sections[section][key][int(key_)-1] = PWInput.proc_val(key, val)
else:
sections[section][key] = PWInput.proc_val(key, val)
elif mode[0] == "pseudo":
m = re.match(r'(\w+)\s+(\d*.\d*)\s+(.*)', line)
if m:
pseudo[m.group(1).strip()] = {}
pseudo[m.group(1).strip()]["index"] = pseudo_index
pseudo[m.group(1).strip()]["pseudopot"] = m.group(3).strip()
pseudo_index += 1
elif mode[0] == "kpoints":
m = re.match(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)', line)
if m:
kpoints_grid = (int(m.group(1)), int(m.group(2)), int(m.group(3)))
kpoints_shift = (int(m.group(4)), int(m.group(5)), int(m.group(6)))
else:
kpoints_mode = mode[1]
elif mode[0] == "structure":
m_l = re.match(r'(-?\d+\.?\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line)
m_p = re.match(r'(\w+)\s+(-?\d+\.\d*)\s+(-?\d+\.?\d*)\s+(-?\d+\.?\d*)', line)
if m_l:
lattice += [ float(m_l.group(1)), float(m_l.group(2)), float(m_l.group(3)) ]
elif m_p:
site_properties["pseudo"].append(pseudo[m_p.group(1)]["pseudopot"])
species += [pseudo[m_p.group(1)]["pseudopot"].split(".")[0]]
coords += [[float(m_p.group(2)), float(m_p.group(3)), float(m_p.group(4))]]
for k, v in site_properties.items():
if k != "pseudo":
site_properties[k].append(sections['system'][k][pseudo[m_p.group(1)]["index"]])
if mode[1] == "angstrom":
coords_are_cartesian = True
elif mode[1] == "crystal":
coords_are_cartesian = False
structure = Structure(Lattice(lattice), species, coords,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
return PWInput(structure=structure, control=sections["control"],
system=sections["system"], electrons=sections["electrons"],
ions=sections["ions"], cell=sections["cell"], kpoints_mode=kpoints_mode,
kpoints_grid=kpoints_grid, kpoints_shift=kpoints_shift)
def proc_val(key, val):
"""
Static helper method to convert PWINPUT parameters to proper type, e.g.,
integers, floats, etc.
Args:
key: PWINPUT parameter key
val: Actual value of PWINPUT parameter.
"""
float_keys = ('etot_conv_thr','forc_conv_thr','conv_thr','Hubbard_U','Hubbard_J0','defauss',
'starting_magnetization',)
int_keys = ('nstep','iprint','nberrycyc','gdir','nppstr','ibrav','nat','ntyp','nbnd','nr1',
'nr2','nr3','nr1s','nr2s','nr3s','nspin','nqx1','nqx2','nqx3','lda_plus_u_kind',
'edir','report','esm_nfit','space_group','origin_choice','electron_maxstep',
'mixing_ndim','mixing_fixed_ns','ortho_para','diago_cg_maxiter','diago_david_ndim',
'nraise','bfgs_ndim','if_pos','nks','nk1','nk2','nk3','sk1','sk2','sk3','nconstr')
bool_keys = ('wf_collect','tstress','tprnfor','lkpoint_dir','tefield','dipfield','lelfield',
'lorbm','lberry','lfcpopt','monopole','nosym','nosym_evc','noinv','no_t_rev',
'force_symmorphic','use_all_frac','one_atom_occupations','starting_spin_angle',
'noncolin','x_gamma_extrapolation','lda_plus_u','lspinorb','london',
'ts_vdw_isolated','xdm','uniqueb','rhombohedral','realxz','block',
'scf_must_converge','adaptive_thr','diago_full_acc','tqr','remove_rigid_rot',
'refold_pos')
def smart_int_or_float(numstr):
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in bool_keys:
if val.lower() == ".true.":
return True
elif val.lower() == ".false.":
return False
else:
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*d?-?\d*", val.lower()).group(0).replace("d", "e"))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
try:
val = val.replace("d","e")
return smart_int_or_float(val)
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
m = re.match(r"^[\"|'](.+)[\"|']$", val)
if m:
return m.group(1)
class PWInputError(BaseException):
pass
class PWOutput(object):
patterns = {
"energies": r'total energy\s+=\s+([\d\.\-]+)\sRy',
"ecut": r'kinetic\-energy cutoff\s+=\s+([\d\.\-]+)\s+Ry',
"lattice_type": r'bravais\-lattice index\s+=\s+(\d+)',
"celldm1": r"celldm\(1\)=\s+([\d\.]+)\s",
"celldm2": r"celldm\(2\)=\s+([\d\.]+)\s",
"celldm3": r"celldm\(3\)=\s+([\d\.]+)\s",
"celldm4": r"celldm\(4\)=\s+([\d\.]+)\s",
"celldm5": r"celldm\(5\)=\s+([\d\.]+)\s",
"celldm6": r"celldm\(6\)=\s+([\d\.]+)\s",
"nkpts": r"number of k points=\s+([\d]+)"
}
def __init__(self, filename):
self.filename = filename
self.data = defaultdict(list)
self.read_pattern(PWOutput.patterns)
for k, v in self.data.items():
if k == "energies":
self.data[k] = [float(i[0][0]) for i in v]
elif k in ["lattice_type", "nkpts"]:
self.data[k] = int(v[0][0][0])
else:
self.data[k] = float(v[0][0][0])
def read_pattern(self, patterns, reverse=False,
terminate_on_match=False, postprocess=str):
"""
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned
values are lists of lists, because you can grep multiple
items on one line.
"""
matches = regrep(self.filename, patterns, reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess)
self.data.update(matches)
def get_celldm(self, i):
return self.data["celldm%d" % i]
@property
def final_energy(self):
return self.data["energies"][-1]
@property
def lattice_type(self):
return self.data["lattice_type"]
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Iterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat as forward_compat
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
class IteratorTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testNoGradients(self):
component = constant_op.constant([1.])
side = constant_op.constant(0.)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset_ops.make_one_shot_iterator(dataset).get_next()
self.assertIsNone(gradients_impl.gradients(value, component)[0])
self.assertIsNone(gradients_impl.gradients(value, side)[0])
self.assertIsNone(gradients_impl.gradients(value, [component, side])[0])
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testCapturingStateInOneShotRaisesException(self):
var = variables.Variable(37.0, name="myvar")
dataset = (
dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0])
.map(lambda x: x + var))
with self.assertRaisesRegexp(
ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support "
"datasets that capture stateful objects.+myvar"):
dataset_ops.make_one_shot_iterator(dataset)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.default_test_combinations())
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14))
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for j in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % j
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(array_ops.gather([0], [4]))
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, ""):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, ""):
sess.run(next_element)
with self.cached_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, ""):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testSimpleSharedResource(self):
components = (np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64))
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(
components).map(lambda x, y, z: (x, y, z)),
shared_name="shared_iterator")
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = iterator_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(components))
get_next = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(dataset_3), [None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(
dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(
dataset_ops.get_legacy_output_types(dataset_4),
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(
[None], dataset_ops.get_legacy_output_shapes(iterator).as_list())
with self.cached_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testReinitializableIteratorWithFunctions(self):
def g():
for i in range(10):
yield i
iterator = iterator_ops.Iterator.from_structure(dtypes.int64, [])
next_element = iterator.get_next()
with self.cached_session() as sess:
dataset_1 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_1))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
dataset_2 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_2))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
@combinations.generate(test_base.default_test_combinations())
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), [None])
# Test validation of dataset argument.
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int32),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int64),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float64))))
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset_3),
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testIteratorStringHandleFuture(self):
with forward_compat.forward_compatibility_horizon(2018, 8, 4):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset_3),
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(
10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testIteratorStringHandleReuseTensorObject(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
one_shot_iterator = dataset_ops.make_one_shot_iterator(dataset)
initializable_iterator = dataset_ops.make_initializable_iterator(dataset)
structure_iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(dataset))
created_ops = len(ops.get_default_graph().get_operations())
self.assertIs(one_shot_iterator.string_handle(),
one_shot_iterator.string_handle())
self.assertIs(initializable_iterator.string_handle(),
initializable_iterator.string_handle())
self.assertIs(structure_iterator.string_handle(),
structure_iterator.string_handle())
# Assert that getting the (default) string handle creates no ops.
self.assertEqual(created_ops, len(ops.get_default_graph().get_operations()))
# Specifying an explicit name will create a new op.
handle_with_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo", handle_with_name.op.name)
self.assertIsNot(one_shot_iterator.string_handle(), handle_with_name)
handle_with_same_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo_1", handle_with_same_name.op.name)
self.assertIsNot(handle_with_name, handle_with_same_name)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testIteratorStringHandleError(self):
dataset_int_scalar = (
dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat())
dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]))
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_int_scalar = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [])
feedable_int_vector = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [None])
feedable_int_any = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32)
with self.cached_session() as sess:
handle_int_scalar = sess.run(dataset_ops.make_one_shot_iterator(
dataset_int_scalar).string_handle())
handle_float_vector = sess.run(dataset_ops.make_one_shot_iterator(
dataset_float_vector).string_handle())
self.assertEqual(1,
sess.run(
feedable_int_scalar.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
self.assertEqual(2,
sess.run(
feedable_int_any.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_float_vector}))
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testRemoteIteratorUsingRemoteCallOpDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 3
with ops.device("/job:localhost/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.session(config=worker_config) as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [1])
# Fails when target is cpu:2 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:2"
})
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testRemoteIteratorUsingRemoteCallOpMultiWorkers(self):
s1 = server_lib.Server.create_local_server()
s2 = server_lib.Server.create_local_server()
s3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
workers = cluster_def.job.add()
workers.name = "worker"
workers.tasks[0] = s1.target[len("grpc://"):]
workers.tasks[1] = s2.target[len("grpc://"):]
client = cluster_def.job.add()
client.name = "client"
client.tasks[0] = s3.target[len("grpc://"):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
worker_devices = [
"/job:worker/replica:0/task:%d/cpu:0" % i for i in range(2)
]
itr_handles = []
for device in worker_devices:
with ops.device(device):
src = dataset_ops.Dataset.from_tensor_slices([device])
itr = dataset_ops.make_one_shot_iterator(src)
itr_handles.append(itr.string_handle())
targets = dataset_ops.Dataset.from_tensor_slices(worker_devices)
handles = dataset_ops.Dataset.from_tensor_slices(itr_handles)
@function.Defun(dtypes.string)
def loading_func(h):
remote_itr = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(itr),
dataset_ops.get_legacy_output_shapes(itr))
return remote_itr.get_next()
def map_fn(target, handle):
return functional_ops.remote_call(
args=[handle], Tout=[dtypes.string], f=loading_func, target=target)
with ops.device("/job:client"):
client_dataset = dataset_ops.Dataset.zip((targets, handles)).map(map_fn)
itr = dataset_ops.make_initializable_iterator(client_dataset)
n = itr.get_next()
with session.Session(s3.target, config=config) as sess:
sess.run(itr.initializer)
expected_values = worker_devices
for expected in expected_values:
self.assertEqual((compat.as_bytes(expected),), sess.run(n))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
def _encode_raw(byte_array):
return bytes(bytearray(byte_array))
@function.Defun(dtypes.uint8)
def _remote_fn(h):
handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
iterator_3_handle_uint8 = parsing_ops.decode_raw(
input_bytes=iterator_3_handle, out_type=dtypes.uint8)
remote_op = functional_ops.remote_call(
args=[iterator_3_handle_uint8],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.cached_session() as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [1])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
@combinations.generate(
combinations.combine(tf_api_version=[1, 2], mode=["graph"]))
def testRepeatedGetNextWarning(self):
iterator = dataset_ops.make_one_shot_iterator(dataset_ops.Dataset.range(10))
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
for _ in range(100):
iterator.get_next()
self.assertEqual(100 - iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD, len(w))
for warning in w:
self.assertIn(
iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE, str(warning.message))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
expected_element_structure=tensor_spec.TensorSpec([],
dtypes.float32),
expected_output_classes=ops.Tensor,
expected_output_types=dtypes.float32,
expected_output_shapes=[[]])))
def testTensorIteratorStructure(self, expected_element_structure,
expected_output_classes,
expected_output_types,
expected_output_shapes):
tf_value_fn = lambda: constant_op.constant(37.0)
tf_value = tf_value_fn()
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(tf_value))
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(iterator), expected_element_structure))
self.assertEqual(expected_output_classes,
dataset_ops.get_legacy_output_classes(iterator))
self.assertEqual(expected_output_types,
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(expected_output_shapes,
dataset_ops.get_legacy_output_shapes(iterator))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
expected_element_structure=sparse_tensor.SparseTensorSpec(
[1], dtypes.int32),
expected_output_classes=sparse_tensor.SparseTensor,
expected_output_types=dtypes.int32,
expected_output_shapes=[[1]])))
def testSparseTensorIteratorStructure(self, expected_element_structure,
expected_output_classes,
expected_output_types,
expected_output_shapes):
def tf_value_fn():
return sparse_tensor.SparseTensor(
indices=[[0]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1])
tf_value = tf_value_fn()
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(tf_value))
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(iterator), expected_element_structure))
self.assertEqual(expected_output_classes,
dataset_ops.get_legacy_output_classes(iterator))
self.assertEqual(expected_output_types,
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(expected_output_shapes,
dataset_ops.get_legacy_output_shapes(iterator))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
expected_element_structure={
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (tensor_spec.TensorSpec([1], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string))
},
expected_output_classes={
"a": ops.Tensor,
"b": (ops.Tensor, ops.Tensor)
},
expected_output_types={
"a": dtypes.float32,
"b": (dtypes.string, dtypes.string)
},
expected_output_shapes={
"a": [],
"b": ([1], [])
})))
def testNestedTensorIteratorStructure(self, expected_element_structure,
expected_output_classes,
expected_output_types,
expected_output_shapes):
def tf_value_fn():
return {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
}
tf_value = tf_value_fn()
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(tf_value))
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(iterator), expected_element_structure))
self.assertEqual(expected_output_classes,
dataset_ops.get_legacy_output_classes(iterator))
self.assertEqual(expected_output_types,
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(expected_output_shapes,
dataset_ops.get_legacy_output_shapes(iterator))
@combinations.generate(test_base.default_test_combinations())
def testIteratorGetNextName(self):
with ops.Graph().as_default():
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(37.0))
next_element = iterator.get_next(name="overridden_name")
self.assertEqual("overridden_name", next_element.op.name)
@combinations.generate(
combinations.combine(
tf_api_version=[1, 2],
mode="eager",
execution_mode=[context.ASYNC, context.SYNC]))
def testIteratorEagerIteration(self, execution_mode):
with context.eager_mode(), context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for foo in iterator:
self.assertEqual(val, foo.numpy())
val += 1
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testOwnedIteratorFunction(self):
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
@def_function.function
def fn():
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for _ in range(10):
queue.enqueue(next(iterator))
fn()
for i in range(10):
self.assertEqual(queue.dequeue().numpy(), i)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testOwnedIteratorFunctionError(self):
# In this test we verify that a function that raises an error ends up
# properly deallocating the iterator resource.
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
queue.enqueue(0)
def init_fn(n):
return n
def next_fn(_):
ds = dataset_ops.Dataset.range(0)
return next(iter(ds))
def finalize_fn(n):
queue.enqueue(0)
return n
@def_function.function
def fn():
dataset = dataset_ops._GeneratorDataset(1, init_fn, next_fn, finalize_fn)
iterator = iter(dataset)
next(iterator)
with self.assertRaises(errors.OutOfRangeError):
fn()
self.assertEqual(queue.size().numpy(), 2)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(iterator):
trace_count[0] += 1
counter = np.int64(0)
for elem in iterator:
counter += elem
return counter
dataset = dataset_ops.Dataset.range(5)
dataset2 = dataset_ops.Dataset.range(10)
for _ in range(10):
self.assertEqual(self.evaluate(f(iter(dataset))), 10)
self.assertEqual(self.evaluate(f(iter(dataset2))), 45)
self.assertEqual(trace_count[0], 1)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testNestedFunctionsIteratorResource(self):
@def_function.function
def sum_dataset(ds):
it = iter(ds)
@def_function.function
def next_element(it):
return next(it)
total = 0
for _ in range(10):
total += next_element(it)
return total
ds = dataset_ops.Dataset.range(10)
self.assertEqual(sum_dataset(ds).numpy(), 45)
self.assertEqual(sum_dataset(ds).numpy(), 45)
if __name__ == "__main__":
test.main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layer wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.platform import test
class TimeDistributedTest(test.TestCase):
def test_timedistributed_dense(self):
# first, test with Dense layer
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
# test config
model.get_config()
def test_timedistributed_static_batch_size(self):
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4), batch_size=10))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 2)),
epochs=1,
batch_size=10)
def test_timedistributed_conv2d(self):
# test with Conv2D
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Conv2D(5, (2, 2), padding='same'),
input_shape=(2, 4, 4, 3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5)))
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_timedistributed_stacked(self):
# test stacked layers
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2), input_shape=(3, 4)))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(
np.random.random((10, 3, 4)),
np.random.random((10, 3, 3)),
epochs=1,
batch_size=10)
def test_regularizers(self):
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(2, kernel_regularizer='l1'),
input_shape=(3, 4)))
model.add(keras.layers.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
self.assertEqual(len(model.losses), 1)
def test_TimeDistributed_learning_phase(self):
with self.test_session():
# test layers that need learning_phase to be set
np.random.seed(1234)
x = keras.layers.Input(shape=(3, 2))
y = keras.layers.TimeDistributed(
keras.layers.Dropout(.999))(x, training=True)
model = keras.models.Model(x, y)
y = model.predict(np.random.random((10, 3, 2)))
self.assertAllClose(np.mean(y), 0., atol=1e-1, rtol=1e-1)
def test_TimeDistributed_batchnorm(self):
with self.test_session():
# test that wrapped BN updates still work.
model = keras.models.Sequential()
model.add(keras.layers.TimeDistributed(
keras.layers.BatchNormalization(center=True, scale=True),
name='bn',
input_shape=(10, 2)))
model.compile(optimizer='rmsprop', loss='mse')
# Assert that mean and variance are 0 and 1.
td = model.layers[0]
self.assertAllClose(td.get_weights()[2], np.array([0, 0]))
assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Train
model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
# Assert that mean and variance changed.
assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
# Verify input_map has one mapping from inputs to reshaped inputs.
self.assertEqual(len(td._input_map.keys()), 1)
def test_TimeDistributed_trainable(self):
# test layers that need learning_phase to be set
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.TimeDistributed(keras.layers.BatchNormalization())
_ = layer(x)
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
layer.trainable = False
assert not layer.updates
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.updates) == 2
assert len(layer.trainable_weights) == 2
class BidirectionalTest(test.TestCase):
def test_bidirectional(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.test_session():
for mode in ['sum', 'concat', 'ave', 'mul']:
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
# test with Sequential model
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode, input_shape=(timesteps, dim)))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test compute output shape
ref_shape = model.layers[-1].output.get_shape()
shape = model.layers[-1].compute_output_shape(
(None, timesteps, dim))
self.assertListEqual(shape.as_list(), ref_shape.as_list())
# test config
model.get_config()
model = keras.models.model_from_json(model.to_json())
model.summary()
def test_bidirectional_weight_loading(self):
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
with self.test_session():
x = np.random.random((samples, timesteps, dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim), input_shape=(timesteps, dim)))
y_ref = model.predict(x)
weights = model.layers[-1].get_weights()
model.layers[-1].set_weights(weights)
y = model.predict(x)
self.assertAllClose(y, y_ref)
def test_bidirectional_stacked(self):
# test stacked bidirectional layers
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.test_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
model = keras.models.Sequential()
model.add(
keras.layers.Bidirectional(
rnn(output_dim, return_sequences=True),
merge_mode=mode,
input_shape=(timesteps, dim)))
model.add(keras.layers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
# test with functional API
inputs = keras.layers.Input((timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_bidirectional_statefulness(self):
# Bidirectional and stateful
rnn = keras.layers.SimpleRNN
samples = 2
dim = 2
timesteps = 2
output_dim = 2
mode = 'sum'
with self.test_session():
x = np.random.random((samples, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((samples, target_dim))
inputs = keras.layers.Input(batch_shape=(1, timesteps, dim))
output = keras.layers.Bidirectional(
rnn(output_dim, stateful=True), merge_mode=mode)(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, epochs=1, batch_size=1)
def test_Bidirectional_merged_value(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
x = [np.random.rand(samples, timesteps, dim)]
with self.test_session():
for merge_mode in ['sum', 'mul', 'ave', 'concat', None]:
if merge_mode == 'sum':
merge_func = lambda y, y_rev: y + y_rev
elif merge_mode == 'mul':
merge_func = lambda y, y_rev: y * y_rev
elif merge_mode == 'ave':
merge_func = lambda y, y_rev: (y + y_rev) / 2
elif merge_mode == 'concat':
merge_func = lambda y, y_rev: np.concatenate((y, y_rev), axis=-1)
else:
merge_func = lambda y, y_rev: [y, y_rev]
# basic case
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_sequences=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], _to_list(layer(inputs)))
f_forward = keras.backend.function([inputs],
[layer.forward_layer.call(inputs)])
f_backward = keras.backend.function(
[inputs],
[keras.backend.reverse(layer.backward_layer.call(inputs), 1)])
y_merged = f_merged(x)
y_expected = _to_list(merge_func(f_forward(x)[0], f_backward(x)[0]))
assert len(y_merged) == len(y_expected)
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
# test return_state
inputs = keras.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True), merge_mode=merge_mode)
f_merged = keras.backend.function([inputs], layer(inputs))
f_forward = keras.backend.function([inputs],
layer.forward_layer.call(inputs))
f_backward = keras.backend.function([inputs],
layer.backward_layer.call(inputs))
n_states = len(layer.layer.states)
y_merged = f_merged(x)
y_forward = f_forward(x)
y_backward = f_backward(x)
y_expected = _to_list(merge_func(y_forward[0], y_backward[0]))
assert len(y_merged) == len(y_expected) + n_states * 2
for x1, x2 in zip(y_merged, y_expected):
self.assertAllClose(x1, x2, atol=1e-5)
y_merged = y_merged[-n_states * 2:]
y_forward = y_forward[-n_states:]
y_backward = y_backward[-n_states:]
for state_birnn, state_inner in zip(y_merged, y_forward + y_backward):
self.assertAllClose(state_birnn, state_inner, atol=1e-5)
def test_Bidirectional_dropout(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
merge_mode = 'sum'
x = [np.random.rand(samples, timesteps, dim)]
with self.test_session():
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, recurrent_dropout=0.2), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs, training=True))
assert all(not getattr(x, '_uses_learning_phase') for x in outputs)
inputs = keras.Input((timesteps, dim))
wrapped = keras.layers.Bidirectional(
rnn(units, dropout=0.2, return_state=True), merge_mode=merge_mode)
outputs = _to_list(wrapped(inputs))
assert all(x._uses_learning_phase for x in outputs)
model = keras.Model(inputs, outputs)
assert model.uses_learning_phase
y1 = _to_list(model.predict(x))
y2 = _to_list(model.predict(x))
for x1, x2 in zip(y1, y2):
self.assertAllClose(x1, x2, atol=1e-5)
def test_Bidirectional_state_reuse(self):
rnn = keras.layers.LSTM
samples = 2
dim = 5
timesteps = 3
units = 3
with self.test_session():
input1 = keras.layers.Input((timesteps, dim))
layer = keras.layers.Bidirectional(
rnn(units, return_state=True, return_sequences=True))
state = layer(input1)[1:]
# test passing invalid initial_state: passing a tensor
input2 = keras.layers.Input((timesteps, dim))
with self.assertRaises(ValueError):
output = keras.layers.Bidirectional(
rnn(units))(input2, initial_state=state[0])
# test valid usage: passing a list
output = keras.layers.Bidirectional(rnn(units))(input2,
initial_state=state)
model = keras.models.Model([input1, input2], output)
assert len(model.layers) == 4
assert isinstance(model.layers[-1].input, list)
inputs = [np.random.rand(samples, timesteps, dim),
np.random.rand(samples, timesteps, dim)]
model.predict(inputs)
def test_Bidirectional_trainable(self):
# test layers that need learning_phase to be set
with self.test_session():
x = keras.layers.Input(shape=(3, 2))
layer = keras.layers.Bidirectional(keras.layers.SimpleRNN(3))
_ = layer(x)
assert len(layer.trainable_weights) == 6
layer.trainable = False
assert not layer.trainable_weights
layer.trainable = True
assert len(layer.trainable_weights) == 6
def _to_list(ls):
if isinstance(ls, list):
return ls
else:
return [ls]
if __name__ == '__main__':
test.main()
| |
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import os
import re
import functools
import numpy as np
_HAVE_XARRAY = False
try:
import xarray as xr
_HAVE_XARRAY = True
except ImportError:
pass
_HAVE_TORCH = False
try:
import torch
_HAVE_TORCH = True
except ImportError:
pass
def camel_to_snake_case(name):
snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
snake = re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake)
return snake.replace("__", "_").lower()
def is_arraylike(arr):
return (
hasattr(arr, "shape")
and hasattr(arr, "dtype")
and hasattr(arr, "__array__")
and hasattr(arr, "ndim")
)
def move_first_dimension_to_last(arr):
import numpy as np
dest = list(range(arr.ndim))
source = dest.copy()
end = source.pop()
source.insert(0, end)
arr_contiguous_channels = np.moveaxis(arr, source, dest).copy()
return arr_contiguous_channels
def move_last_dimension_to_first(arr):
import numpy as np
dest = list(range(arr.ndim))
source = dest.copy()
end = source.pop()
source.insert(0, end)
arr_interleaved_channels = np.moveaxis(arr, dest, source).copy()
return arr_interleaved_channels
def accept_array_like_xarray_torch(image_filter):
"""Decorator that allows itk.ProcessObject snake_case functions to accept
NumPy array-like, PyTorch Tensor's or xarray DataArray inputs for itk.Image inputs.
If a NumPy array-like is passed as an input, output itk.Image's are converted to numpy.ndarray's.
If a torch.Tensor is passed as an input, output itk.Image's are converted to torch.Tensors.
If a xarray DataArray is passed as an input, output itk.Image's are converted to xarray.DataArray's."""
import numpy as np
import itk
@functools.wraps(image_filter)
def image_filter_wrapper(*args, **kwargs):
have_array_input = False
have_xarray_input = False
have_torch_input = False
args_list = list(args)
for index, arg in enumerate(args):
if _HAVE_XARRAY and isinstance(arg, xr.DataArray):
have_xarray_input = True
image = itk.image_from_xarray(arg)
args_list[index] = image
elif _HAVE_TORCH and isinstance(arg, torch.Tensor):
have_torch_input = True
channels = arg.shape[0] # assume first dimension is channels
arr = np.asarray(arg)
if channels > 1: # change from contiguous to interleaved channel order
arr = move_last_dimension_to_first(arr)
image = itk.image_view_from_array(arr, is_vector=channels > 1)
args_list[index] = image
elif not isinstance(arg, itk.Object) and is_arraylike(arg):
have_array_input = True
array = np.asarray(arg)
image = itk.image_view_from_array(array)
args_list[index] = image
potential_image_input_kwargs = ("input", "input1", "input2", "input3")
for key, value in kwargs.items():
if key.lower() in potential_image_input_kwargs or "image" in key.lower():
if _HAVE_XARRAY and isinstance(value, xr.DataArray):
have_xarray_input = True
image = itk.image_from_xarray(value)
kwargs[key] = image
elif _HAVE_TORCH and isinstance(value, torch.Tensor):
have_torch_input = True
channels = value.shape[0] # assume first dimension is channels
arr = np.asarray(value)
if (
channels > 1
): # change from contiguous to interleaved channel order
arr = move_last_dimension_to_first(arr)
image = itk.image_view_from_array(arr, is_vector=channels > 1)
kwargs[key] = image
elif not isinstance(value, itk.Object) and is_arraylike(value):
have_array_input = True
array = np.asarray(value)
image = itk.image_view_from_array(array)
kwargs[key] = image
if have_xarray_input or have_torch_input or have_array_input:
# Convert output itk.Image's to numpy.ndarray's
output = image_filter(*tuple(args_list), **kwargs)
if isinstance(output, tuple):
output_list = list(output)
for index, value in enumerate(output_list):
if isinstance(value, itk.Image):
if have_xarray_input:
data_array = itk.xarray_from_image(value)
output_list[index] = data_array
elif have_torch_input:
channels = value.GetNumberOfComponentsPerPixel()
data_array = itk.array_view_from_image(value)
if (
channels > 1
): # change from interleaved to contiguous channel order
data_array = move_first_dimension_to_last(data_array)
torch_tensor = torch.from_numpy(data_array)
output_list[index] = torch_tensor
else:
array = itk.array_view_from_image(value)
output_list[index] = array
return tuple(output_list)
else:
if isinstance(output, itk.Image):
if have_xarray_input:
output = itk.xarray_from_image(output)
elif have_torch_input:
channels = output.GetNumberOfComponentsPerPixel()
output = itk.array_view_from_image(output)
if (
channels > 1
): # change from interleaved to contiguous channel order
output = move_first_dimension_to_last(output)
output = torch.from_numpy(output)
else:
output = itk.array_view_from_image(output)
return output
else:
return image_filter(*args, **kwargs)
return image_filter_wrapper
def wasm_type_from_image_type(itkimage): # noqa: C901
import itk
component = itk.template(itkimage)[1][0]
if component == itk.UL:
if os.name == "nt":
return "uint32", "Scalar"
else:
return "uint64", "Scalar"
mangle = None
pixelType = "Scalar"
if component == itk.SL:
if os.name == "nt":
return "int32", "Scalar"
else:
return "int64", "Scalar"
if component in (
itk.SC,
itk.UC,
itk.SS,
itk.US,
itk.SI,
itk.UI,
itk.F,
itk.D,
itk.B,
itk.SL,
itk.SLL,
itk.UL,
itk.ULL,
):
mangle = component
elif component in [i[1] for i in itk.Vector.items()]:
mangle = itk.template(component)[1][0]
pixelType = "Vector"
elif component == itk.complex[itk.F]:
return "float32", "Complex"
elif component == itk.complex[itk.D]:
return "float64", "Complex"
elif component in [i[1] for i in itk.CovariantVector.items()]:
mangle = itk.template(component)[1][0]
pixelType = ("CovariantVector",)
elif component in [i[1] for i in itk.Offset.items()]:
return "int64", "Offset"
elif component in [i[1] for i in itk.FixedArray.items()]:
mangle = itk.template(component)[1][0]
pixelType = "FixedArray"
elif component in [i[1] for i in itk.RGBAPixel.items()]:
mangle = itk.template(component)[1][0]
pixelType = "RGBA"
elif component in [i[1] for i in itk.RGBPixel.items()]:
mangle = itk.template(component)[1][0]
pixelType = "RGB"
elif component in [i[1] for i in itk.SymmetricSecondRankTensor.items()]:
# SymmetricSecondRankTensor
mangle = itk.template(component)[1][0]
pixelType = "SymmetrySecondRankTensor"
else:
raise RuntimeError(f"Unrecognized component type: {str(component)}")
def _long_type():
if os.name == "nt":
return "int32"
else:
return "int64"
_python_to_js = {
itk.SC: "int8",
itk.UC: "uint8",
itk.SS: "int16",
itk.US: "uint16",
itk.SI: "int32",
itk.UI: "uint32",
itk.F: "float32",
itk.D: "float64",
itk.B: "uint8",
itk.SL: _long_type(),
itk.UL: "u" + _long_type(),
itk.SLL: "int64",
itk.ULL: "uint64",
}
imageType = dict(
dimension=itkimage.GetImageDimension(),
componentType=_python_to_js[mangle],
pixelType=pixelType,
components=itkimage.GetNumberOfComponentsPerPixel(),
)
return imageType
def image_type_from_wasm_type(jstype):
import itk
_pixelType_to_prefix = {
"Scalar": "",
"RGB": "RGB",
"RGBA": "RGBA",
"Offset": "O",
"Vector": "V",
"CovariantVector": "CV",
"SymmetricSecondRankTensor": "SSRT",
"FixedArray": "FA",
}
pixelType = jstype["pixelType"]
dimension = jstype["dimension"]
if pixelType == "Complex":
if jstype["componentType"] == "float32":
return itk.Image[itk.complex, itk.F], np.float32
else:
return itk.Image[itk.complex, itk.D], np.float64
def _long_type():
if os.name == "nt":
return "LL"
else:
return "L"
prefix = _pixelType_to_prefix[pixelType]
_js_to_python = {
"int8": "SC",
"uint8": "UC",
"int16": "SS",
"uint16": "US",
"int32": "SI",
"uint32": "UI",
"int64": "S" + _long_type(),
"uint64": "U" + _long_type(),
"float32": "F",
"float64": "D",
}
if pixelType != "Offset":
prefix += _js_to_python[jstype["componentType"]]
if pixelType not in ("Scalar", "RGB", "RGBA", "Complex"):
prefix += str(dimension)
prefix += str(dimension)
return getattr(itk.Image, prefix)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
# Reference implementation of depthwise_conv2d
def ReferenceDepthwiseConv2D(input_tensor, filter_tensor, strides, padding,
data_format=None):
# Reference implementation of depthwise convolution that uses regular
# convolution.
convs = []
in_channels = filter_tensor.shape[2]
# Use a custom implementation of depthwise conv2d using slicing.
for channel in xrange(in_channels):
# Slice the input along channel
if data_format == "NCHW":
input_slice = input_tensor[:, channel:channel+1, :, :]
else:
input_slice = input_tensor[:, :, :, channel:channel+1]
# Slice the filters. Filters are H, W, InC, DepthMultiplier
filter_slice = filter_tensor[:, :, channel:channel+1, :]
# Do conv
convs.append(nn_ops.conv2d(input_slice, filter_slice,
strides, padding,
data_format=data_format,
name="depthwise_slice_%d" % channel))
# Concat along dimension.
if data_format == "NCHW":
return array_ops.concat(convs, 1)
else:
return array_ops.concat(convs, 3)
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],
[3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],
[3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,
8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],
[3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(xla_test.XLATestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NWHC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=data_type).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=data_type).reshape(filter_in_sizes)
with self.cached_session() as sess:
if data_type == np.float32:
tolerance = 1e-4
else:
self.assertEqual(data_type, np.float64)
tolerance = 1e-8
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=data_type)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
with ops.device("CPU"):
conv_interface = ReferenceDepthwiseConv2D(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = sess.run(conv_native, {t1: x1, t2: x2})
interface_result = sess.run(conv_interface, {t1: x1, t2: x2})
print("data_type:", data_type, "max diff = ",
np.amax(np.absolute(native_result - interface_result)))
self.assertAllClose(
np.ravel(native_result), np.ravel(interface_result), rtol=tolerance)
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2D,", index, "th config:", input_size, "*",
filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size, filter_size, stride, padding, data_type)
def testDepthwiseConv2DFormat(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFormat,", index, "th config:", input_size,
"*", filter_size, "stride:", stride, "padding:", padding)
for data_type in self.float_types:
# TODO(phawkins): the reference implementation only supports float32.
if data_type == np.float32:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = np.array([f * 1.0 for f in range(1, total_size_1 + 1)],
dtype=np.float32).reshape(tensor_in_sizes)
x2 = np.array([f * 1.0 for f in range(1, total_size_2 + 1)],
dtype=np.float32).reshape(filter_in_sizes)
with self.cached_session() as sess:
t1 = array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32)
t2 = array_ops.placeholder(shape=filter_in_sizes, dtype=np.float32)
with self.test_scope():
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv, {t1: x1, t2: x2})
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-4)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.cached_session():
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = array_ops.placeholder(np.float32, shape=filter_sizes)
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
if use_xla:
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
else:
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval({t1: x1, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-3, atol=1e-3)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DInputGradCompare,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "padding:",
padding)
self._CompareBackpropInput(input_size, filter_size, output_size, stride,
padding)
def _CompareBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_xla):
with self.cached_session():
t0 = array_ops.placeholder(np.float32, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
if use_xla:
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
else:
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval({t0: x0, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_xla=True)
cpu_value = _GetVal(use_xla=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
print("Testing DepthwiseConv2DFilterGradCompare,", index, "th config:",
input_size, "*", filter_size, "stride:", stride, "padding:",
padding)
self._CompareBackpropFilter(input_size, filter_size, output_size,
stride, padding)
if __name__ == "__main__":
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.